FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 
29 
32  AVBufferRef *buf);
33 
34 MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
35 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
36 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
37 MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
38 MAKE_ACCESSORS(AVFrame, frame, int, channels)
39 MAKE_ACCESSORS(AVFrame, frame, int, sample_rate)
40 MAKE_ACCESSORS(AVFrame, frame, AVDictionary *, metadata)
41 MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
42 MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
43 MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
44 MAKE_ACCESSORS(AVFrame, frame, enum AVColorRange, color_range)
45 
46 #define CHECK_CHANNELS_CONSISTENCY(frame) \
47  av_assert2(!(frame)->channel_layout || \
48  (frame)->channels == \
49  av_get_channel_layout_nb_channels((frame)->channel_layout))
50 
51 AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame) {return &frame->metadata;};
52 
53 #if FF_API_FRAME_QP
54 int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
55 {
57 
58  f->qp_table_buf = buf;
59 
61  f->qscale_table = buf->data;
62  f->qstride = stride;
63  f->qscale_type = qp_type;
65 
66  return 0;
67 }
68 
69 int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
70 {
72  *stride = f->qstride;
73  *type = f->qscale_type;
75 
76  if (!f->qp_table_buf)
77  return NULL;
78 
79  return f->qp_table_buf->data;
80 }
81 #endif
82 
83 const char *av_get_colorspace_name(enum AVColorSpace val)
84 {
85  static const char * const name[] = {
86  [AVCOL_SPC_RGB] = "GBR",
87  [AVCOL_SPC_BT709] = "bt709",
88  [AVCOL_SPC_FCC] = "fcc",
89  [AVCOL_SPC_BT470BG] = "bt470bg",
90  [AVCOL_SPC_SMPTE170M] = "smpte170m",
91  [AVCOL_SPC_SMPTE240M] = "smpte240m",
92  [AVCOL_SPC_YCOCG] = "YCgCo",
93  };
94  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
95  return NULL;
96  return name[val];
97 }
98 
99 static void get_frame_defaults(AVFrame *frame)
100 {
101  if (frame->extended_data != frame->data)
102  av_freep(&frame->extended_data);
103 
104  memset(frame, 0, sizeof(*frame));
105 
106  frame->pts =
107  frame->pkt_dts = AV_NOPTS_VALUE;
108 #if FF_API_PKT_PTS
110  frame->pkt_pts = AV_NOPTS_VALUE;
112 #endif
114  frame->pkt_duration = 0;
115  frame->pkt_pos = -1;
116  frame->pkt_size = -1;
117  frame->key_frame = 1;
118  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
119  frame->format = -1; /* unknown */
120  frame->extended_data = frame->data;
126  frame->flags = 0;
127 }
128 
129 static void free_side_data(AVFrameSideData **ptr_sd)
130 {
131  AVFrameSideData *sd = *ptr_sd;
132 
133  av_buffer_unref(&sd->buf);
134  av_dict_free(&sd->metadata);
135  av_freep(ptr_sd);
136 }
137 
138 static void wipe_side_data(AVFrame *frame)
139 {
140  int i;
141 
142  for (i = 0; i < frame->nb_side_data; i++) {
143  free_side_data(&frame->side_data[i]);
144  }
145  frame->nb_side_data = 0;
146 
147  av_freep(&frame->side_data);
148 }
149 
150 AVFrame *av_frame_alloc(void)
151 {
152  AVFrame *frame = av_mallocz(sizeof(*frame));
153 
154  if (!frame)
155  return NULL;
156 
157  frame->extended_data = NULL;
158  get_frame_defaults(frame);
159 
160  return frame;
161 }
162 
163 void av_frame_free(AVFrame **frame)
164 {
165  if (!frame || !*frame)
166  return;
167 
168  av_frame_unref(*frame);
169  av_freep(frame);
170 }
171 
172 static int get_video_buffer(AVFrame *frame, int align)
173 {
175  int ret, i;
176 
177  if (!desc)
178  return AVERROR(EINVAL);
179 
180  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
181  return ret;
182 
183  if (!frame->linesize[0]) {
184  for(i=1; i<=align; i+=i) {
185  ret = av_image_fill_linesizes(frame->linesize, frame->format,
186  FFALIGN(frame->width, i));
187  if (ret < 0)
188  return ret;
189  if (!(frame->linesize[0] & (align-1)))
190  break;
191  }
192 
193  for (i = 0; i < 4 && frame->linesize[i]; i++)
194  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
195  }
196 
197  for (i = 0; i < 4 && frame->linesize[i]; i++) {
198  int h = FFALIGN(frame->height, 32);
199  if (i == 1 || i == 2)
200  h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
201 
202  frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
203  if (!frame->buf[i])
204  goto fail;
205 
206  frame->data[i] = frame->buf[i]->data;
207  }
208  if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
209  av_buffer_unref(&frame->buf[1]);
210  frame->buf[1] = av_buffer_alloc(AVPALETTE_SIZE);
211  if (!frame->buf[1])
212  goto fail;
213  frame->data[1] = frame->buf[1]->data;
214  }
215 
216  frame->extended_data = frame->data;
217 
218  return 0;
219 fail:
220  av_frame_unref(frame);
221  return AVERROR(ENOMEM);
222 }
223 
224 static int get_audio_buffer(AVFrame *frame, int align)
225 {
226  int channels;
227  int planar = av_sample_fmt_is_planar(frame->format);
228  int planes;
229  int ret, i;
230 
231  if (!frame->channels)
233 
234  channels = frame->channels;
235  planes = planar ? channels : 1;
236 
238  if (!frame->linesize[0]) {
239  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
240  frame->nb_samples, frame->format,
241  align);
242  if (ret < 0)
243  return ret;
244  }
245 
246  if (planes > AV_NUM_DATA_POINTERS) {
247  frame->extended_data = av_mallocz_array(planes,
248  sizeof(*frame->extended_data));
250  sizeof(*frame->extended_buf));
251  if (!frame->extended_data || !frame->extended_buf) {
252  av_freep(&frame->extended_data);
253  av_freep(&frame->extended_buf);
254  return AVERROR(ENOMEM);
255  }
256  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
257  } else
258  frame->extended_data = frame->data;
259 
260  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
261  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
262  if (!frame->buf[i]) {
263  av_frame_unref(frame);
264  return AVERROR(ENOMEM);
265  }
266  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
267  }
268  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
269  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
270  if (!frame->extended_buf[i]) {
271  av_frame_unref(frame);
272  return AVERROR(ENOMEM);
273  }
274  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
275  }
276  return 0;
277 
278 }
279 
280 int av_frame_get_buffer(AVFrame *frame, int align)
281 {
282  if (frame->format < 0)
283  return AVERROR(EINVAL);
284 
285  if (frame->width > 0 && frame->height > 0)
286  return get_video_buffer(frame, align);
287  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
288  return get_audio_buffer(frame, align);
289 
290  return AVERROR(EINVAL);
291 }
292 
293 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
294 {
295  int i;
296 
297  dst->key_frame = src->key_frame;
298  dst->pict_type = src->pict_type;
300  dst->crop_top = src->crop_top;
301  dst->crop_bottom = src->crop_bottom;
302  dst->crop_left = src->crop_left;
303  dst->crop_right = src->crop_right;
304  dst->pts = src->pts;
305  dst->repeat_pict = src->repeat_pict;
307  dst->top_field_first = src->top_field_first;
309  dst->sample_rate = src->sample_rate;
310  dst->opaque = src->opaque;
311 #if FF_API_PKT_PTS
313  dst->pkt_pts = src->pkt_pts;
315 #endif
316  dst->pkt_dts = src->pkt_dts;
317  dst->pkt_pos = src->pkt_pos;
318  dst->pkt_size = src->pkt_size;
319  dst->pkt_duration = src->pkt_duration;
321  dst->quality = src->quality;
325  dst->flags = src->flags;
327  dst->color_primaries = src->color_primaries;
328  dst->color_trc = src->color_trc;
329  dst->colorspace = src->colorspace;
330  dst->color_range = src->color_range;
331  dst->chroma_location = src->chroma_location;
332 
333  av_dict_copy(&dst->metadata, src->metadata, 0);
334 
335 #if FF_API_ERROR_FRAME
337  memcpy(dst->error, src->error, sizeof(dst->error));
339 #endif
340 
341  for (i = 0; i < src->nb_side_data; i++) {
342  const AVFrameSideData *sd_src = src->side_data[i];
343  AVFrameSideData *sd_dst;
344  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
345  && (src->width != dst->width || src->height != dst->height))
346  continue;
347  if (force_copy) {
348  sd_dst = av_frame_new_side_data(dst, sd_src->type,
349  sd_src->size);
350  if (!sd_dst) {
351  wipe_side_data(dst);
352  return AVERROR(ENOMEM);
353  }
354  memcpy(sd_dst->data, sd_src->data, sd_src->size);
355  } else {
356  sd_dst = frame_new_side_data(dst, sd_src->type, av_buffer_ref(sd_src->buf));
357  if (!sd_dst) {
358  wipe_side_data(dst);
359  return AVERROR(ENOMEM);
360  }
361  }
362  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
363  }
364 
365 #if FF_API_FRAME_QP
367  dst->qscale_table = NULL;
368  dst->qstride = 0;
369  dst->qscale_type = 0;
371  if (src->qp_table_buf) {
373  if (dst->qp_table_buf) {
374  dst->qscale_table = dst->qp_table_buf->data;
375  dst->qstride = src->qstride;
376  dst->qscale_type = src->qscale_type;
377  }
378  }
380 #endif
381 
383  if (src->opaque_ref) {
384  dst->opaque_ref = av_buffer_ref(src->opaque_ref);
385  if (!dst->opaque_ref)
386  return AVERROR(ENOMEM);
387  }
388 
389  return 0;
390 }
391 
392 int av_frame_ref(AVFrame *dst, const AVFrame *src)
393 {
394  int i, ret = 0;
395 
396  av_assert1(dst->width == 0 && dst->height == 0);
397  av_assert1(dst->channels == 0);
398 
399  dst->format = src->format;
400  dst->width = src->width;
401  dst->height = src->height;
402  dst->channels = src->channels;
403  dst->channel_layout = src->channel_layout;
404  dst->nb_samples = src->nb_samples;
405 
406  ret = frame_copy_props(dst, src, 0);
407  if (ret < 0)
408  return ret;
409 
410  /* duplicate the frame data if it's not refcounted */
411  if (!src->buf[0]) {
412  ret = av_frame_get_buffer(dst, 32);
413  if (ret < 0)
414  return ret;
415 
416  ret = av_frame_copy(dst, src);
417  if (ret < 0)
418  av_frame_unref(dst);
419 
420  return ret;
421  }
422 
423  /* ref the buffers */
424  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
425  if (!src->buf[i])
426  continue;
427  dst->buf[i] = av_buffer_ref(src->buf[i]);
428  if (!dst->buf[i]) {
429  ret = AVERROR(ENOMEM);
430  goto fail;
431  }
432  }
433 
434  if (src->extended_buf) {
435  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
436  src->nb_extended_buf);
437  if (!dst->extended_buf) {
438  ret = AVERROR(ENOMEM);
439  goto fail;
440  }
441  dst->nb_extended_buf = src->nb_extended_buf;
442 
443  for (i = 0; i < src->nb_extended_buf; i++) {
444  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
445  if (!dst->extended_buf[i]) {
446  ret = AVERROR(ENOMEM);
447  goto fail;
448  }
449  }
450  }
451 
452  if (src->hw_frames_ctx) {
454  if (!dst->hw_frames_ctx) {
455  ret = AVERROR(ENOMEM);
456  goto fail;
457  }
458  }
459 
460  /* duplicate extended data */
461  if (src->extended_data != src->data) {
462  int ch = src->channels;
463 
464  if (!ch) {
465  ret = AVERROR(EINVAL);
466  goto fail;
467  }
469 
470  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
471  if (!dst->extended_data) {
472  ret = AVERROR(ENOMEM);
473  goto fail;
474  }
475  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
476  } else
477  dst->extended_data = dst->data;
478 
479  memcpy(dst->data, src->data, sizeof(src->data));
480  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
481 
482  return 0;
483 
484 fail:
485  av_frame_unref(dst);
486  return ret;
487 }
488 
489 AVFrame *av_frame_clone(const AVFrame *src)
490 {
491  AVFrame *ret = av_frame_alloc();
492 
493  if (!ret)
494  return NULL;
495 
496  if (av_frame_ref(ret, src) < 0)
497  av_frame_free(&ret);
498 
499  return ret;
500 }
501 
502 void av_frame_unref(AVFrame *frame)
503 {
504  int i;
505 
506  if (!frame)
507  return;
508 
509  wipe_side_data(frame);
510 
511  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
512  av_buffer_unref(&frame->buf[i]);
513  for (i = 0; i < frame->nb_extended_buf; i++)
514  av_buffer_unref(&frame->extended_buf[i]);
515  av_freep(&frame->extended_buf);
516  av_dict_free(&frame->metadata);
517 #if FF_API_FRAME_QP
518  av_buffer_unref(&frame->qp_table_buf);
519 #endif
520 
522 
523  av_buffer_unref(&frame->opaque_ref);
524 
525  get_frame_defaults(frame);
526 }
527 
528 void av_frame_move_ref(AVFrame *dst, AVFrame *src)
529 {
530  av_assert1(dst->width == 0 && dst->height == 0);
531  av_assert1(dst->channels == 0);
532 
533  *dst = *src;
534  if (src->extended_data == src->data)
535  dst->extended_data = dst->data;
536  memset(src, 0, sizeof(*src));
537  get_frame_defaults(src);
538 }
539 
540 int av_frame_is_writable(AVFrame *frame)
541 {
542  int i, ret = 1;
543 
544  /* assume non-refcounted frames are not writable */
545  if (!frame->buf[0])
546  return 0;
547 
548  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
549  if (frame->buf[i])
550  ret &= !!av_buffer_is_writable(frame->buf[i]);
551  for (i = 0; i < frame->nb_extended_buf; i++)
552  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
553 
554  return ret;
555 }
556 
557 int av_frame_make_writable(AVFrame *frame)
558 {
559  AVFrame tmp;
560  int ret;
561 
562  if (!frame->buf[0])
563  return AVERROR(EINVAL);
564 
565  if (av_frame_is_writable(frame))
566  return 0;
567 
568  memset(&tmp, 0, sizeof(tmp));
569  tmp.format = frame->format;
570  tmp.width = frame->width;
571  tmp.height = frame->height;
572  tmp.channels = frame->channels;
573  tmp.channel_layout = frame->channel_layout;
574  tmp.nb_samples = frame->nb_samples;
575  ret = av_frame_get_buffer(&tmp, 32);
576  if (ret < 0)
577  return ret;
578 
579  ret = av_frame_copy(&tmp, frame);
580  if (ret < 0) {
581  av_frame_unref(&tmp);
582  return ret;
583  }
584 
585  ret = av_frame_copy_props(&tmp, frame);
586  if (ret < 0) {
587  av_frame_unref(&tmp);
588  return ret;
589  }
590 
591  av_frame_unref(frame);
592 
593  *frame = tmp;
594  if (tmp.data == tmp.extended_data)
595  frame->extended_data = frame->data;
596 
597  return 0;
598 }
599 
600 int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
601 {
602  return frame_copy_props(dst, src, 1);
603 }
604 
606 {
607  uint8_t *data;
608  int planes, i;
609 
610  if (frame->nb_samples) {
611  int channels = frame->channels;
612  if (!channels)
613  return NULL;
615  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
616  } else
617  planes = 4;
618 
619  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
620  return NULL;
621  data = frame->extended_data[plane];
622 
623  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
624  AVBufferRef *buf = frame->buf[i];
625  if (data >= buf->data && data < buf->data + buf->size)
626  return buf;
627  }
628  for (i = 0; i < frame->nb_extended_buf; i++) {
629  AVBufferRef *buf = frame->extended_buf[i];
630  if (data >= buf->data && data < buf->data + buf->size)
631  return buf;
632  }
633  return NULL;
634 }
635 
636 static AVFrameSideData *frame_new_side_data(AVFrame *frame,
638  AVBufferRef *buf)
639 {
640  AVFrameSideData *ret, **tmp;
641 
642  if (!buf)
643  return NULL;
644 
645  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
646  goto fail;
647 
648  tmp = av_realloc(frame->side_data,
649  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
650  if (!tmp)
651  goto fail;
652  frame->side_data = tmp;
653 
654  ret = av_mallocz(sizeof(*ret));
655  if (!ret)
656  goto fail;
657 
658  ret->buf = buf;
659  ret->data = ret->buf->data;
660  ret->size = buf->size;
661  ret->type = type;
662 
663  frame->side_data[frame->nb_side_data++] = ret;
664 
665  return ret;
666 fail:
667  av_buffer_unref(&buf);
668  return NULL;
669 }
670 
673  int size)
674 {
675 
676  return frame_new_side_data(frame, type, av_buffer_alloc(size));
677 }
678 
681 {
682  int i;
683 
684  for (i = 0; i < frame->nb_side_data; i++) {
685  if (frame->side_data[i]->type == type)
686  return frame->side_data[i];
687  }
688  return NULL;
689 }
690 
691 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
692 {
693  const uint8_t *src_data[4];
694  int i, planes;
695 
696  if (dst->width < src->width ||
697  dst->height < src->height)
698  return AVERROR(EINVAL);
699 
700  planes = av_pix_fmt_count_planes(dst->format);
701  for (i = 0; i < planes; i++)
702  if (!dst->data[i] || !src->data[i])
703  return AVERROR(EINVAL);
704 
705  memcpy(src_data, src->data, sizeof(src_data));
706  av_image_copy(dst->data, dst->linesize,
707  src_data, src->linesize,
708  dst->format, src->width, src->height);
709 
710  return 0;
711 }
712 
713 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
714 {
716  int channels = dst->channels;
717  int planes = planar ? channels : 1;
718  int i;
719 
720  if (dst->nb_samples != src->nb_samples ||
721  dst->channels != src->channels ||
722  dst->channel_layout != src->channel_layout)
723  return AVERROR(EINVAL);
724 
726 
727  for (i = 0; i < planes; i++)
728  if (!dst->extended_data[i] || !src->extended_data[i])
729  return AVERROR(EINVAL);
730 
732  dst->nb_samples, channels, dst->format);
733 
734  return 0;
735 }
736 
737 int av_frame_copy(AVFrame *dst, const AVFrame *src)
738 {
739  if (dst->format != src->format || dst->format < 0)
740  return AVERROR(EINVAL);
741 
742  if (dst->width > 0 && dst->height > 0)
743  return frame_copy_video(dst, src);
744  else if (dst->nb_samples > 0 && dst->channels > 0)
745  return frame_copy_audio(dst, src);
746 
747  return AVERROR(EINVAL);
748 }
749 
751 {
752  int i;
753 
754  for (i = 0; i < frame->nb_side_data; i++) {
755  AVFrameSideData *sd = frame->side_data[i];
756  if (sd->type == type) {
757  free_side_data(&frame->side_data[i]);
758  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
759  frame->nb_side_data--;
760  }
761  }
762 }
763 
765 {
766  switch(type) {
767  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
768  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
769  case AV_FRAME_DATA_STEREO3D: return "Stereoscopic 3d metadata";
770  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
771  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
772  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
773  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
774  case AV_FRAME_DATA_AFD: return "Active format description";
775  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
776  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
777  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
778  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
779  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
780  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
781  }
782  return NULL;
783 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:456
int plane
Definition: avisynth_c.h:422
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:771
#define AV_NUM_DATA_POINTERS
Definition: frame.h:195
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2333
This structure describes decoded (raw) audio or video data.
Definition: frame.h:194
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:135
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
attribute_deprecated int qscale_type
Definition: frame.h:523
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:466
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2373
Memory handling functions.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:386
const char * desc
Definition: nvenc.c:60
AVDictionary * metadata
Definition: frame.h:160
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:460
void * opaque
for some private data of the user
Definition: frame.h:322
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:404
Content light level (based on CTA-861.3).
Definition: frame.h:136
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:336
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:461
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:54
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
Mastering display metadata associated with a video frame.
Definition: frame.h:119
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:138
color_range
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:528
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
size_t crop_bottom
Definition: frame.h:553
#define src
Definition: vp8dsp.c:254
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:69
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:455
attribute_deprecated int8_t * qscale_table
QP table.
Definition: frame.h:515
functionally identical to above
Definition: pixfmt.h:462
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:679
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:531
Public dictionary API.
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:691
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:454
size_t crop_left
Definition: frame.h:554
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
attribute_deprecated int qstride
QP store stride.
Definition: frame.h:520
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:392
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:287
AVBufferRef * buf
Definition: frame.h:161
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
static AVFrame * frame
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
Structure to hold side data for an AVFrame.
Definition: frame.h:156
AVDictionary * metadata
metadata.
Definition: frame.h:481
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:341
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:474
ptrdiff_t size
Definition: opengl_enc.c:101
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
Metadata relevant to a downmix procedure.
Definition: frame.h:72
int nb_side_data
Definition: frame.h:407
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:329
AVFrameSideData ** side_data
Definition: frame.h:406
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
int width
Definition: frame.h:252
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:90
#define AVERROR(e)
Definition: error.h:43
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:293
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:439
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:450
simple assert() macros that are a bit more flexible than ISO C assert().
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:224
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:229
#define fail()
Definition: checkasm.h:95
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:385
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:737
reference-counted frame API
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:372
size_t crop_top
Definition: frame.h:552
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:281
int channels
number of audio channels, only used for audio.
Definition: frame.h:499
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:277
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:432
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:312
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:400
#define AV_PIX_FMT_FLAG_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:158
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:605
AVBufferRef * qp_table_buf
Definition: frame.h:525
AVFrameSideDataType
Definition: frame.h:48
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:317
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:133
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:83
static AVFrameSideData * frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Definition: frame.c:636
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:459
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:489
#define FF_ARRAY_ELEMS(a)
const AVS_VideoInfo int align
Definition: avisynth_c.h:795
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:267
int coded_picture_number
picture number in bitstream order
Definition: frame.h:308
sample_rate
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:474
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
AVDictionary ** avpriv_frame_get_metadatap(AVFrame *frame)
Definition: frame.c:51
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:540
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:225
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:172
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:282
uint8_t * data
Definition: frame.h:158
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
If side data of the supplied type exists in the frame, free it and remove it from the frame...
Definition: frame.c:750
void * buf
Definition: avisynth_c.h:690
size_t crop_right
Definition: frame.h:555
GLint GLenum type
Definition: opengl_enc.c:105
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:362
int sample_rate
Sample rate of the audio data.
Definition: frame.h:367
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:671
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:351
refcounted data buffer API
enum AVChromaLocation chroma_location
Definition: frame.h:452
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:459
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:490
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:713
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:764
int size
Size of data in bytes.
Definition: buffer.h:93
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:280
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:502
enum AVFrameSideDataType type
Definition: frame.h:157
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:557
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:208
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:295
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:303
A reference to a data buffer.
Definition: buffer.h:81
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal and external API header
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:46
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:346
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:542
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:272
enum AVColorPrimaries color_primaries
Definition: frame.h:441
int height
Definition: frame.h:252
#define av_freep(p)
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:129
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:443
Recommmends skipping the specified number of samples.
Definition: frame.h:108
#define av_malloc_array(a, b)
#define stride
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:241
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:99
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:508
Stereoscopic 3d metadata.
Definition: frame.h:63
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:260
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:600
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
const char * name
Definition: opengl_enc.c:103
static uint8_t tmp[11]
Definition: aes_ctr.c:26