FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 #include "hwcontext.h"
29 
30 #if FF_API_FRAME_GET_SET
31 MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
32 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
33 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
34 MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
35 MAKE_ACCESSORS(AVFrame, frame, int, channels)
36 MAKE_ACCESSORS(AVFrame, frame, int, sample_rate)
37 MAKE_ACCESSORS(AVFrame, frame, AVDictionary *, metadata)
38 MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
39 MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
40 MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
41 MAKE_ACCESSORS(AVFrame, frame, enum AVColorRange, color_range)
42 #endif
43 
44 #define CHECK_CHANNELS_CONSISTENCY(frame) \
45  av_assert2(!(frame)->channel_layout || \
46  (frame)->channels == \
47  av_get_channel_layout_nb_channels((frame)->channel_layout))
48 
49 #if FF_API_FRAME_QP
50 struct qp_properties {
51  int stride;
52  int type;
53 };
54 
55 int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
56 {
57  struct qp_properties *p;
58  AVFrameSideData *sd;
60 
63 
64  f->qp_table_buf = buf;
65  f->qscale_table = buf->data;
66  f->qstride = stride;
67  f->qscale_type = qp_type;
69 
72 
73  ref = av_buffer_ref(buf);
75  av_buffer_unref(&ref);
76  return AVERROR(ENOMEM);
77  }
78 
80  sizeof(struct qp_properties));
81  if (!sd)
82  return AVERROR(ENOMEM);
83 
84  p = (struct qp_properties *)sd->data;
85  p->stride = stride;
86  p->type = qp_type;
87 
88  return 0;
89 }
90 
91 int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
92 {
93  AVBufferRef *buf = NULL;
94 
95  *stride = 0;
96  *type = 0;
97 
99  if (f->qp_table_buf) {
100  *stride = f->qstride;
101  *type = f->qscale_type;
102  buf = f->qp_table_buf;
104  } else {
105  AVFrameSideData *sd;
106  struct qp_properties *p;
108  if (!sd)
109  return NULL;
110  p = (struct qp_properties *)sd->data;
112  if (!sd)
113  return NULL;
114  *stride = p->stride;
115  *type = p->type;
116  buf = sd->buf;
117  }
118 
119  return buf ? buf->data : NULL;
120 }
121 #endif
122 
123 const char *av_get_colorspace_name(enum AVColorSpace val)
124 {
125  static const char * const name[] = {
126  [AVCOL_SPC_RGB] = "GBR",
127  [AVCOL_SPC_BT709] = "bt709",
128  [AVCOL_SPC_FCC] = "fcc",
129  [AVCOL_SPC_BT470BG] = "bt470bg",
130  [AVCOL_SPC_SMPTE170M] = "smpte170m",
131  [AVCOL_SPC_SMPTE240M] = "smpte240m",
132  [AVCOL_SPC_YCOCG] = "YCgCo",
133  };
134  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
135  return NULL;
136  return name[val];
137 }
138 
139 static void get_frame_defaults(AVFrame *frame)
140 {
141  if (frame->extended_data != frame->data)
142  av_freep(&frame->extended_data);
143 
144  memset(frame, 0, sizeof(*frame));
145 
146  frame->pts =
147  frame->pkt_dts = AV_NOPTS_VALUE;
148 #if FF_API_PKT_PTS
150  frame->pkt_pts = AV_NOPTS_VALUE;
152 #endif
154  frame->pkt_duration = 0;
155  frame->pkt_pos = -1;
156  frame->pkt_size = -1;
157  frame->key_frame = 1;
158  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
159  frame->format = -1; /* unknown */
160  frame->extended_data = frame->data;
166  frame->flags = 0;
167 }
168 
169 static void free_side_data(AVFrameSideData **ptr_sd)
170 {
171  AVFrameSideData *sd = *ptr_sd;
172 
173  av_buffer_unref(&sd->buf);
174  av_dict_free(&sd->metadata);
175  av_freep(ptr_sd);
176 }
177 
178 static void wipe_side_data(AVFrame *frame)
179 {
180  int i;
181 
182  for (i = 0; i < frame->nb_side_data; i++) {
183  free_side_data(&frame->side_data[i]);
184  }
185  frame->nb_side_data = 0;
186 
187  av_freep(&frame->side_data);
188 }
189 
190 AVFrame *av_frame_alloc(void)
191 {
192  AVFrame *frame = av_mallocz(sizeof(*frame));
193 
194  if (!frame)
195  return NULL;
196 
197  frame->extended_data = NULL;
198  get_frame_defaults(frame);
199 
200  return frame;
201 }
202 
203 void av_frame_free(AVFrame **frame)
204 {
205  if (!frame || !*frame)
206  return;
207 
208  av_frame_unref(*frame);
209  av_freep(frame);
210 }
211 
212 static int get_video_buffer(AVFrame *frame, int align)
213 {
215  int ret, i, padded_height, total_size;
216  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
217  ptrdiff_t linesizes[4];
218  size_t sizes[4];
219 
220  if (!desc)
221  return AVERROR(EINVAL);
222 
223  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
224  return ret;
225 
226  if (!frame->linesize[0]) {
227  if (align <= 0)
228  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
229 
230  for(i=1; i<=align; i+=i) {
231  ret = av_image_fill_linesizes(frame->linesize, frame->format,
232  FFALIGN(frame->width, i));
233  if (ret < 0)
234  return ret;
235  if (!(frame->linesize[0] & (align-1)))
236  break;
237  }
238 
239  for (i = 0; i < 4 && frame->linesize[i]; i++)
240  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
241  }
242 
243  for (i = 0; i < 4; i++)
244  linesizes[i] = frame->linesize[i];
245 
246  padded_height = FFALIGN(frame->height, 32);
247  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
248  padded_height, linesizes)) < 0)
249  return ret;
250 
251  total_size = 4*plane_padding;
252  for (i = 0; i < 4; i++) {
253  if (sizes[i] > INT_MAX - total_size)
254  return AVERROR(EINVAL);
255  total_size += sizes[i];
256  }
257 
258  frame->buf[0] = av_buffer_alloc(total_size);
259  if (!frame->buf[0]) {
260  ret = AVERROR(ENOMEM);
261  goto fail;
262  }
263 
264  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
265  frame->buf[0]->data, frame->linesize)) < 0)
266  goto fail;
267 
268  for (i = 1; i < 4; i++) {
269  if (frame->data[i])
270  frame->data[i] += i * plane_padding;
271  }
272 
273  frame->extended_data = frame->data;
274 
275  return 0;
276 fail:
277  av_frame_unref(frame);
278  return ret;
279 }
280 
281 static int get_audio_buffer(AVFrame *frame, int align)
282 {
283  int channels;
284  int planar = av_sample_fmt_is_planar(frame->format);
285  int planes;
286  int ret, i;
287 
288  if (!frame->channels)
290 
291  channels = frame->channels;
292  planes = planar ? channels : 1;
293 
295  if (!frame->linesize[0]) {
296  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
297  frame->nb_samples, frame->format,
298  align);
299  if (ret < 0)
300  return ret;
301  }
302 
303  if (planes > AV_NUM_DATA_POINTERS) {
304  frame->extended_data = av_mallocz_array(planes,
305  sizeof(*frame->extended_data));
307  sizeof(*frame->extended_buf));
308  if (!frame->extended_data || !frame->extended_buf) {
309  av_freep(&frame->extended_data);
310  av_freep(&frame->extended_buf);
311  return AVERROR(ENOMEM);
312  }
313  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
314  } else
315  frame->extended_data = frame->data;
316 
317  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
318  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
319  if (!frame->buf[i]) {
320  av_frame_unref(frame);
321  return AVERROR(ENOMEM);
322  }
323  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
324  }
325  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
326  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
327  if (!frame->extended_buf[i]) {
328  av_frame_unref(frame);
329  return AVERROR(ENOMEM);
330  }
331  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
332  }
333  return 0;
334 
335 }
336 
337 int av_frame_get_buffer(AVFrame *frame, int align)
338 {
339  if (frame->format < 0)
340  return AVERROR(EINVAL);
341 
342  if (frame->width > 0 && frame->height > 0)
343  return get_video_buffer(frame, align);
344  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
345  return get_audio_buffer(frame, align);
346 
347  return AVERROR(EINVAL);
348 }
349 
350 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
351 {
352  int i;
353 
354  dst->key_frame = src->key_frame;
355  dst->pict_type = src->pict_type;
357  dst->crop_top = src->crop_top;
358  dst->crop_bottom = src->crop_bottom;
359  dst->crop_left = src->crop_left;
360  dst->crop_right = src->crop_right;
361  dst->pts = src->pts;
362  dst->repeat_pict = src->repeat_pict;
364  dst->top_field_first = src->top_field_first;
366  dst->sample_rate = src->sample_rate;
367  dst->opaque = src->opaque;
368 #if FF_API_PKT_PTS
370  dst->pkt_pts = src->pkt_pts;
372 #endif
373  dst->pkt_dts = src->pkt_dts;
374  dst->pkt_pos = src->pkt_pos;
375  dst->pkt_size = src->pkt_size;
376  dst->pkt_duration = src->pkt_duration;
378  dst->quality = src->quality;
382  dst->flags = src->flags;
384  dst->color_primaries = src->color_primaries;
385  dst->color_trc = src->color_trc;
386  dst->colorspace = src->colorspace;
387  dst->color_range = src->color_range;
388  dst->chroma_location = src->chroma_location;
389 
390  av_dict_copy(&dst->metadata, src->metadata, 0);
391 
392 #if FF_API_ERROR_FRAME
394  memcpy(dst->error, src->error, sizeof(dst->error));
396 #endif
397 
398  for (i = 0; i < src->nb_side_data; i++) {
399  const AVFrameSideData *sd_src = src->side_data[i];
400  AVFrameSideData *sd_dst;
401  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
402  && (src->width != dst->width || src->height != dst->height))
403  continue;
404  if (force_copy) {
405  sd_dst = av_frame_new_side_data(dst, sd_src->type,
406  sd_src->size);
407  if (!sd_dst) {
408  wipe_side_data(dst);
409  return AVERROR(ENOMEM);
410  }
411  memcpy(sd_dst->data, sd_src->data, sd_src->size);
412  } else {
413  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
414  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
415  if (!sd_dst) {
416  av_buffer_unref(&ref);
417  wipe_side_data(dst);
418  return AVERROR(ENOMEM);
419  }
420  }
421  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
422  }
423 
424 #if FF_API_FRAME_QP
426  dst->qscale_table = NULL;
427  dst->qstride = 0;
428  dst->qscale_type = 0;
430  if (src->qp_table_buf) {
432  if (dst->qp_table_buf) {
433  dst->qscale_table = dst->qp_table_buf->data;
434  dst->qstride = src->qstride;
435  dst->qscale_type = src->qscale_type;
436  }
437  }
439 #endif
440 
443  if (src->opaque_ref) {
444  dst->opaque_ref = av_buffer_ref(src->opaque_ref);
445  if (!dst->opaque_ref)
446  return AVERROR(ENOMEM);
447  }
448  if (src->private_ref) {
450  if (!dst->private_ref)
451  return AVERROR(ENOMEM);
452  }
453  return 0;
454 }
455 
456 int av_frame_ref(AVFrame *dst, const AVFrame *src)
457 {
458  int i, ret = 0;
459 
460  av_assert1(dst->width == 0 && dst->height == 0);
461  av_assert1(dst->channels == 0);
462 
463  dst->format = src->format;
464  dst->width = src->width;
465  dst->height = src->height;
466  dst->channels = src->channels;
467  dst->channel_layout = src->channel_layout;
468  dst->nb_samples = src->nb_samples;
469 
470  ret = frame_copy_props(dst, src, 0);
471  if (ret < 0)
472  return ret;
473 
474  /* duplicate the frame data if it's not refcounted */
475  if (!src->buf[0]) {
476  ret = av_frame_get_buffer(dst, 0);
477  if (ret < 0)
478  return ret;
479 
480  ret = av_frame_copy(dst, src);
481  if (ret < 0)
482  av_frame_unref(dst);
483 
484  return ret;
485  }
486 
487  /* ref the buffers */
488  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
489  if (!src->buf[i])
490  continue;
491  dst->buf[i] = av_buffer_ref(src->buf[i]);
492  if (!dst->buf[i]) {
493  ret = AVERROR(ENOMEM);
494  goto fail;
495  }
496  }
497 
498  if (src->extended_buf) {
499  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
500  src->nb_extended_buf);
501  if (!dst->extended_buf) {
502  ret = AVERROR(ENOMEM);
503  goto fail;
504  }
505  dst->nb_extended_buf = src->nb_extended_buf;
506 
507  for (i = 0; i < src->nb_extended_buf; i++) {
508  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
509  if (!dst->extended_buf[i]) {
510  ret = AVERROR(ENOMEM);
511  goto fail;
512  }
513  }
514  }
515 
516  if (src->hw_frames_ctx) {
518  if (!dst->hw_frames_ctx) {
519  ret = AVERROR(ENOMEM);
520  goto fail;
521  }
522  }
523 
524  /* duplicate extended data */
525  if (src->extended_data != src->data) {
526  int ch = src->channels;
527 
528  if (!ch) {
529  ret = AVERROR(EINVAL);
530  goto fail;
531  }
533 
534  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
535  if (!dst->extended_data) {
536  ret = AVERROR(ENOMEM);
537  goto fail;
538  }
539  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
540  } else
541  dst->extended_data = dst->data;
542 
543  memcpy(dst->data, src->data, sizeof(src->data));
544  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
545 
546  return 0;
547 
548 fail:
549  av_frame_unref(dst);
550  return ret;
551 }
552 
553 AVFrame *av_frame_clone(const AVFrame *src)
554 {
555  AVFrame *ret = av_frame_alloc();
556 
557  if (!ret)
558  return NULL;
559 
560  if (av_frame_ref(ret, src) < 0)
561  av_frame_free(&ret);
562 
563  return ret;
564 }
565 
566 void av_frame_unref(AVFrame *frame)
567 {
568  int i;
569 
570  if (!frame)
571  return;
572 
573  wipe_side_data(frame);
574 
575  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
576  av_buffer_unref(&frame->buf[i]);
577  for (i = 0; i < frame->nb_extended_buf; i++)
578  av_buffer_unref(&frame->extended_buf[i]);
579  av_freep(&frame->extended_buf);
580  av_dict_free(&frame->metadata);
581 #if FF_API_FRAME_QP
583  av_buffer_unref(&frame->qp_table_buf);
585 #endif
586 
588 
589  av_buffer_unref(&frame->opaque_ref);
590  av_buffer_unref(&frame->private_ref);
591 
592  get_frame_defaults(frame);
593 }
594 
595 void av_frame_move_ref(AVFrame *dst, AVFrame *src)
596 {
597  av_assert1(dst->width == 0 && dst->height == 0);
598  av_assert1(dst->channels == 0);
599 
600  *dst = *src;
601  if (src->extended_data == src->data)
602  dst->extended_data = dst->data;
603  memset(src, 0, sizeof(*src));
604  get_frame_defaults(src);
605 }
606 
607 int av_frame_is_writable(AVFrame *frame)
608 {
609  int i, ret = 1;
610 
611  /* assume non-refcounted frames are not writable */
612  if (!frame->buf[0])
613  return 0;
614 
615  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
616  if (frame->buf[i])
617  ret &= !!av_buffer_is_writable(frame->buf[i]);
618  for (i = 0; i < frame->nb_extended_buf; i++)
619  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
620 
621  return ret;
622 }
623 
624 int av_frame_make_writable(AVFrame *frame)
625 {
626  AVFrame tmp;
627  int ret;
628 
629  if (!frame->buf[0])
630  return AVERROR(EINVAL);
631 
632  if (av_frame_is_writable(frame))
633  return 0;
634 
635  memset(&tmp, 0, sizeof(tmp));
636  tmp.format = frame->format;
637  tmp.width = frame->width;
638  tmp.height = frame->height;
639  tmp.channels = frame->channels;
640  tmp.channel_layout = frame->channel_layout;
641  tmp.nb_samples = frame->nb_samples;
642 
643  if (frame->hw_frames_ctx)
644  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
645  else
646  ret = av_frame_get_buffer(&tmp, 0);
647  if (ret < 0)
648  return ret;
649 
650  ret = av_frame_copy(&tmp, frame);
651  if (ret < 0) {
652  av_frame_unref(&tmp);
653  return ret;
654  }
655 
656  ret = av_frame_copy_props(&tmp, frame);
657  if (ret < 0) {
658  av_frame_unref(&tmp);
659  return ret;
660  }
661 
662  av_frame_unref(frame);
663 
664  *frame = tmp;
665  if (tmp.data == tmp.extended_data)
666  frame->extended_data = frame->data;
667 
668  return 0;
669 }
670 
671 int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
672 {
673  return frame_copy_props(dst, src, 1);
674 }
675 
676 AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane)
677 {
678  uint8_t *data;
679  int planes, i;
680 
681  if (frame->nb_samples) {
682  int channels = frame->channels;
683  if (!channels)
684  return NULL;
686  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
687  } else
688  planes = 4;
689 
690  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
691  return NULL;
692  data = frame->extended_data[plane];
693 
694  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
695  AVBufferRef *buf = frame->buf[i];
696  if (data >= buf->data && data < buf->data + buf->size)
697  return buf;
698  }
699  for (i = 0; i < frame->nb_extended_buf; i++) {
700  AVBufferRef *buf = frame->extended_buf[i];
701  if (data >= buf->data && data < buf->data + buf->size)
702  return buf;
703  }
704  return NULL;
705 }
706 
709  AVBufferRef *buf)
710 {
711  AVFrameSideData *ret, **tmp;
712 
713  if (!buf)
714  return NULL;
715 
716  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
717  return NULL;
718 
719  tmp = av_realloc(frame->side_data,
720  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
721  if (!tmp)
722  return NULL;
723  frame->side_data = tmp;
724 
725  ret = av_mallocz(sizeof(*ret));
726  if (!ret)
727  return NULL;
728 
729  ret->buf = buf;
730  ret->data = ret->buf->data;
731  ret->size = buf->size;
732  ret->type = type;
733 
734  frame->side_data[frame->nb_side_data++] = ret;
735 
736  return ret;
737 }
738 
741  int size)
742 {
744  AVBufferRef *buf = av_buffer_alloc(size);
745  ret = av_frame_new_side_data_from_buf(frame, type, buf);
746  if (!ret)
747  av_buffer_unref(&buf);
748  return ret;
749 }
750 
753 {
754  int i;
755 
756  for (i = 0; i < frame->nb_side_data; i++) {
757  if (frame->side_data[i]->type == type)
758  return frame->side_data[i];
759  }
760  return NULL;
761 }
762 
763 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
764 {
765  const uint8_t *src_data[4];
766  int i, planes;
767 
768  if (dst->width < src->width ||
769  dst->height < src->height)
770  return AVERROR(EINVAL);
771 
772  if (src->hw_frames_ctx || dst->hw_frames_ctx)
773  return av_hwframe_transfer_data(dst, src, 0);
774 
775  planes = av_pix_fmt_count_planes(dst->format);
776  for (i = 0; i < planes; i++)
777  if (!dst->data[i] || !src->data[i])
778  return AVERROR(EINVAL);
779 
780  memcpy(src_data, src->data, sizeof(src_data));
781  av_image_copy(dst->data, dst->linesize,
782  src_data, src->linesize,
783  dst->format, src->width, src->height);
784 
785  return 0;
786 }
787 
788 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
789 {
791  int channels = dst->channels;
792  int planes = planar ? channels : 1;
793  int i;
794 
795  if (dst->nb_samples != src->nb_samples ||
796  dst->channels != src->channels ||
797  dst->channel_layout != src->channel_layout)
798  return AVERROR(EINVAL);
799 
801 
802  for (i = 0; i < planes; i++)
803  if (!dst->extended_data[i] || !src->extended_data[i])
804  return AVERROR(EINVAL);
805 
807  dst->nb_samples, channels, dst->format);
808 
809  return 0;
810 }
811 
812 int av_frame_copy(AVFrame *dst, const AVFrame *src)
813 {
814  if (dst->format != src->format || dst->format < 0)
815  return AVERROR(EINVAL);
816 
817  if (dst->width > 0 && dst->height > 0)
818  return frame_copy_video(dst, src);
819  else if (dst->nb_samples > 0 && dst->channels > 0)
820  return frame_copy_audio(dst, src);
821 
822  return AVERROR(EINVAL);
823 }
824 
826 {
827  int i;
828 
829  for (i = frame->nb_side_data - 1; i >= 0; i--) {
830  AVFrameSideData *sd = frame->side_data[i];
831  if (sd->type == type) {
832  free_side_data(&frame->side_data[i]);
833  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
834  frame->nb_side_data--;
835  }
836  }
837 }
838 
840 {
841  switch(type) {
842  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
843  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
844  case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
845  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
846  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
847  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
848  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
849  case AV_FRAME_DATA_AFD: return "Active format description";
850  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
851  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
852  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
853  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
854  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
855  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
856  case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
857  case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
858  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
859 #if FF_API_FRAME_QP
860  case AV_FRAME_DATA_QP_TABLE_PROPERTIES: return "QP table properties";
861  case AV_FRAME_DATA_QP_TABLE_DATA: return "QP table data";
862 #endif
863  case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
864  case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
865  case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
866  case AV_FRAME_DATA_SEI_UNREGISTERED: return "H.26[45] User Data Unregistered SEI message";
867  }
868  return NULL;
869 }
870 
871 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
872  const AVPixFmtDescriptor *desc)
873 {
874  int i, j;
875 
876  for (i = 0; frame->data[i]; i++) {
878  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
879  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
880 
881  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL) && i == 1) {
882  offsets[i] = 0;
883  break;
884  }
885 
886  /* find any component descriptor for this plane */
887  for (j = 0; j < desc->nb_components; j++) {
888  if (desc->comp[j].plane == i) {
889  comp = &desc->comp[j];
890  break;
891  }
892  }
893  if (!comp)
894  return AVERROR_BUG;
895 
896  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
897  (frame->crop_left >> shift_x) * comp->step;
898  }
899 
900  return 0;
901 }
902 
903 int av_frame_apply_cropping(AVFrame *frame, int flags)
904 {
905  const AVPixFmtDescriptor *desc;
906  size_t offsets[4];
907  int i;
908 
909  if (!(frame->width > 0 && frame->height > 0))
910  return AVERROR(EINVAL);
911 
912  if (frame->crop_left >= INT_MAX - frame->crop_right ||
913  frame->crop_top >= INT_MAX - frame->crop_bottom ||
914  (frame->crop_left + frame->crop_right) >= frame->width ||
915  (frame->crop_top + frame->crop_bottom) >= frame->height)
916  return AVERROR(ERANGE);
917 
918  desc = av_pix_fmt_desc_get(frame->format);
919  if (!desc)
920  return AVERROR_BUG;
921 
922  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
923  * formats cannot be easily handled here either (and corresponding decoders
924  * should not export any cropping anyway), so do the same for those as well.
925  * */
927  frame->width -= frame->crop_right;
928  frame->height -= frame->crop_bottom;
929  frame->crop_right = 0;
930  frame->crop_bottom = 0;
931  return 0;
932  }
933 
934  /* calculate the offsets for each plane */
935  calc_cropping_offsets(offsets, frame, desc);
936 
937  /* adjust the offsets to avoid breaking alignment */
938  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
939  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
940  int min_log2_align = INT_MAX;
941 
942  for (i = 0; frame->data[i]; i++) {
943  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
944  min_log2_align = FFMIN(log2_align, min_log2_align);
945  }
946 
947  /* we assume, and it should always be true, that the data alignment is
948  * related to the cropping alignment by a constant power-of-2 factor */
949  if (log2_crop_align < min_log2_align)
950  return AVERROR_BUG;
951 
952  if (min_log2_align < 5) {
953  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
954  calc_cropping_offsets(offsets, frame, desc);
955  }
956  }
957 
958  for (i = 0; frame->data[i]; i++)
959  frame->data[i] += offsets[i];
960 
961  frame->width -= (frame->crop_left + frame->crop_right);
962  frame->height -= (frame->crop_top + frame->crop_bottom);
963  frame->crop_left = 0;
964  frame->crop_right = 0;
965  frame->crop_top = 0;
966  frame->crop_bottom = 0;
967 
968  return 0;
969 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55
int plane
Which of the 4 planes contains the component.
Definition: pixdesc.h:35
#define NULL
Definition: coverity.c:32
#define ff_ctz
Definition: intmath.h:106
#define AV_NUM_DATA_POINTERS
Definition: frame.h:309
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:134
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
attribute_deprecated int qscale_type
Definition: frame.h:638
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:579
const char * desc
Definition: libsvtav1.c:79
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
Memory handling functions.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:499
AVDictionary * metadata
Definition: frame.h:218
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
void * opaque
for some private data of the user
Definition: frame.h:436
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:517
Content light level (based on CTA-861.3).
Definition: frame.h:136
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:450
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:519
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
Mastering display metadata associated with a video frame.
Definition: frame.h:119
attribute_deprecated AVBufferRef * qp_table_buf
Definition: frame.h:641
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:178
color_range
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:595
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
size_t crop_bottom
Definition: frame.h:669
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:513
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
attribute_deprecated int8_t * qscale_table
QP table.
Definition: frame.h:630
functionally identical to above
Definition: pixfmt.h:520
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:751
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:647
Public dictionary API.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:763
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:512
size_t crop_left
Definition: frame.h:670
#define f(width, name)
Definition: cbs_vp9.c:255
attribute_deprecated int qstride
QP store stride.
Definition: frame.h:635
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:687
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:456
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
AVBufferRef * buf
Definition: frame.h:219
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
HDR dynamic metadata associated with a video frame.
Definition: frame.h:175
Structure to hold side data for an AVFrame.
Definition: frame.h:214
AVDictionary * metadata
metadata.
Definition: frame.h:594
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:455
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:535
ptrdiff_t size
Definition: opengl_enc.c:100
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
channels
Definition: aptx.h:33
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
Metadata relevant to a downmix procedure.
Definition: frame.h:72
int nb_side_data
Definition: frame.h:520
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:443
AVFrameSideData ** side_data
Definition: frame.h:519
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
Definition: frame.h:152
#define src
Definition: vp8dsp.c:254
static const struct @323 planes[]
int width
Definition: frame.h:366
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:91
static const int sizes[][2]
Definition: img2dec.c:53
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:350
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:903
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:552
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:563
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
#define FFMAX(a, b)
Definition: common.h:94
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:281
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
#define fail()
Definition: checkasm.h:123
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:812
reference-counted frame API
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:485
size_t crop_top
Definition: frame.h:668
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:443
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int channels
number of audio channels, only used for audio.
Definition: frame.h:614
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:545
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:426
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:513
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:707
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:676
AVFrameSideDataType
Definition: frame.h:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:431
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:133
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:123
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:517
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:553
#define FF_ARRAY_ELEMS(a)
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:83
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
int coded_picture_number
picture number in bitstream order
Definition: frame.h:422
sample_rate
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:146
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:587
int stride
Definition: frame.c:51
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:607
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:212
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:871
User data unregistered metadata associated with a video frame.
Definition: frame.h:194
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:396
uint8_t * data
Definition: frame.h:216
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:825
size_t crop_right
Definition: frame.h:671
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:475
int sample_rate
Sample rate of the audio data.
Definition: frame.h:480
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:739
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:181
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:465
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
refcounted data buffer API
enum AVChromaLocation chroma_location
Definition: frame.h:565
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:572
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:603
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:788
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:136
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:839
int size
Size of data in bytes.
Definition: buffer.h:93
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:566
enum AVFrameSideDataType type
Definition: frame.h:215
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:624
#define flags(name, subs,...)
Definition: cbs_av1.c:560
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:409
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:417
A reference to a data buffer.
Definition: buffer.h:81
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal and external API header
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:44
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
#define FF_PSEUDOPAL
Definition: internal.h:335
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:460
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:658
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
enum AVColorPrimaries color_primaries
Definition: frame.h:554
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:186
int height
Definition: frame.h:366
#define av_freep(p)
int type
Definition: frame.c:52
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:169
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:556
Recommmends skipping the specified number of samples.
Definition: frame.h:108
#define av_malloc_array(a, b)
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:953
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:139
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:91
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:623
Stereoscopic 3d metadata.
Definition: frame.h:63
static double val(void *priv, double ch)
Definition: aeval.c:76
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:671
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
Raw QP table data.
Definition: frame.h:159
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
const char * name
Definition: opengl_enc.c:102
static uint8_t tmp[11]
Definition: aes_ctr.c:26