FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 #include "hwcontext.h"
29 
30 #if FF_API_FRAME_GET_SET
31 MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
32 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
33 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
34 MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
38 MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
39 MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
40 MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
42 #endif
43 
44 #define CHECK_CHANNELS_CONSISTENCY(frame) \
45  av_assert2(!(frame)->channel_layout || \
46  (frame)->channels == \
47  av_get_channel_layout_nb_channels((frame)->channel_layout))
48 
49 #if FF_API_FRAME_QP
50 struct qp_properties {
51  int stride;
52  int type;
53 };
54 
55 int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
56 {
57  struct qp_properties *p;
58  AVFrameSideData *sd;
60 
62  av_buffer_unref(&f->qp_table_buf);
63 
64  f->qp_table_buf = buf;
65  f->qscale_table = buf->data;
66  f->qstride = stride;
67  f->qscale_type = qp_type;
69 
72 
73  ref = av_buffer_ref(buf);
76  return AVERROR(ENOMEM);
77  }
78 
80  sizeof(struct qp_properties));
81  if (!sd)
82  return AVERROR(ENOMEM);
83 
84  p = (struct qp_properties *)sd->data;
85  p->stride = stride;
86  p->type = qp_type;
87 
88  return 0;
89 }
90 
91 int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
92 {
93  AVBufferRef *buf = NULL;
94 
95  *stride = 0;
96  *type = 0;
97 
99  if (f->qp_table_buf) {
100  *stride = f->qstride;
101  *type = f->qscale_type;
102  buf = f->qp_table_buf;
104  } else {
105  AVFrameSideData *sd;
106  struct qp_properties *p;
108  if (!sd)
109  return NULL;
110  p = (struct qp_properties *)sd->data;
112  if (!sd)
113  return NULL;
114  *stride = p->stride;
115  *type = p->type;
116  buf = sd->buf;
117  }
118 
119  return buf ? buf->data : NULL;
120 }
121 #endif
122 
124 {
125  static const char * const name[] = {
126  [AVCOL_SPC_RGB] = "GBR",
127  [AVCOL_SPC_BT709] = "bt709",
128  [AVCOL_SPC_FCC] = "fcc",
129  [AVCOL_SPC_BT470BG] = "bt470bg",
130  [AVCOL_SPC_SMPTE170M] = "smpte170m",
131  [AVCOL_SPC_SMPTE240M] = "smpte240m",
132  [AVCOL_SPC_YCOCG] = "YCgCo",
133  };
134  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
135  return NULL;
136  return name[val];
137 }
138 
140 {
141  if (frame->extended_data != frame->data)
142  av_freep(&frame->extended_data);
143 
144  memset(frame, 0, sizeof(*frame));
145 
146  frame->pts =
147  frame->pkt_dts = AV_NOPTS_VALUE;
148 #if FF_API_PKT_PTS
150  frame->pkt_pts = AV_NOPTS_VALUE;
152 #endif
153  frame->best_effort_timestamp = AV_NOPTS_VALUE;
154  frame->pkt_duration = 0;
155  frame->pkt_pos = -1;
156  frame->pkt_size = -1;
157  frame->key_frame = 1;
158  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
159  frame->format = -1; /* unknown */
160  frame->extended_data = frame->data;
161  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
162  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
163  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
164  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
165  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
166  frame->flags = 0;
167 }
168 
169 static void free_side_data(AVFrameSideData **ptr_sd)
170 {
171  AVFrameSideData *sd = *ptr_sd;
172 
173  av_buffer_unref(&sd->buf);
174  av_dict_free(&sd->metadata);
175  av_freep(ptr_sd);
176 }
177 
179 {
180  int i;
181 
182  for (i = 0; i < frame->nb_side_data; i++) {
183  free_side_data(&frame->side_data[i]);
184  }
185  frame->nb_side_data = 0;
186 
187  av_freep(&frame->side_data);
188 }
189 
191 {
192  AVFrame *frame = av_mallocz(sizeof(*frame));
193 
194  if (!frame)
195  return NULL;
196 
197  frame->extended_data = NULL;
199 
200  return frame;
201 }
202 
204 {
205  if (!frame || !*frame)
206  return;
207 
209  av_freep(frame);
210 }
211 
212 static int get_video_buffer(AVFrame *frame, int align)
213 {
215  int ret, i, padded_height, total_size;
216  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
217  ptrdiff_t linesizes[4];
218  size_t sizes[4];
219 
220  if (!desc)
221  return AVERROR(EINVAL);
222 
223  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
224  return ret;
225 
226  if (!frame->linesize[0]) {
227  if (align <= 0)
228  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
229 
230  for(i=1; i<=align; i+=i) {
231  ret = av_image_fill_linesizes(frame->linesize, frame->format,
232  FFALIGN(frame->width, i));
233  if (ret < 0)
234  return ret;
235  if (!(frame->linesize[0] & (align-1)))
236  break;
237  }
238 
239  for (i = 0; i < 4 && frame->linesize[i]; i++)
240  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
241  }
242 
243  for (i = 0; i < 4; i++)
244  linesizes[i] = frame->linesize[i];
245 
246  padded_height = FFALIGN(frame->height, 32);
247  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
248  padded_height, linesizes)) < 0)
249  return ret;
250 
251  total_size = 4*plane_padding;
252  for (i = 0; i < 4; i++) {
253  if (sizes[i] > INT_MAX - total_size)
254  return AVERROR(EINVAL);
255  total_size += sizes[i];
256  }
257 
258  frame->buf[0] = av_buffer_alloc(total_size);
259  if (!frame->buf[0]) {
260  ret = AVERROR(ENOMEM);
261  goto fail;
262  }
263 
264  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
265  frame->buf[0]->data, frame->linesize)) < 0)
266  goto fail;
267 
268  for (i = 1; i < 4; i++) {
269  if (frame->data[i])
270  frame->data[i] += i * plane_padding;
271  }
272 
273  frame->extended_data = frame->data;
274 
275  return 0;
276 fail:
278  return ret;
279 }
280 
281 static int get_audio_buffer(AVFrame *frame, int align)
282 {
283  int channels;
284  int planar = av_sample_fmt_is_planar(frame->format);
285  int planes;
286  int ret, i;
287 
288  if (!frame->channels)
289  frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);
290 
291  channels = frame->channels;
292  planes = planar ? channels : 1;
293 
295  if (!frame->linesize[0]) {
296  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
297  frame->nb_samples, frame->format,
298  align);
299  if (ret < 0)
300  return ret;
301  }
302 
304  frame->extended_data = av_mallocz_array(planes,
305  sizeof(*frame->extended_data));
307  sizeof(*frame->extended_buf));
308  if (!frame->extended_data || !frame->extended_buf) {
309  av_freep(&frame->extended_data);
310  av_freep(&frame->extended_buf);
311  return AVERROR(ENOMEM);
312  }
313  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
314  } else
315  frame->extended_data = frame->data;
316 
317  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
318  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
319  if (!frame->buf[i]) {
321  return AVERROR(ENOMEM);
322  }
323  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
324  }
325  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
326  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
327  if (!frame->extended_buf[i]) {
329  return AVERROR(ENOMEM);
330  }
331  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
332  }
333  return 0;
334 
335 }
336 
338 {
339  if (frame->format < 0)
340  return AVERROR(EINVAL);
341 
342  if (frame->width > 0 && frame->height > 0)
343  return get_video_buffer(frame, align);
344  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
345  return get_audio_buffer(frame, align);
346 
347  return AVERROR(EINVAL);
348 }
349 
350 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
351 {
352  int ret, i;
353 
354  dst->key_frame = src->key_frame;
355  dst->pict_type = src->pict_type;
356  dst->sample_aspect_ratio = src->sample_aspect_ratio;
357  dst->crop_top = src->crop_top;
358  dst->crop_bottom = src->crop_bottom;
359  dst->crop_left = src->crop_left;
360  dst->crop_right = src->crop_right;
361  dst->pts = src->pts;
362  dst->repeat_pict = src->repeat_pict;
363  dst->interlaced_frame = src->interlaced_frame;
364  dst->top_field_first = src->top_field_first;
365  dst->palette_has_changed = src->palette_has_changed;
366  dst->sample_rate = src->sample_rate;
367  dst->opaque = src->opaque;
368 #if FF_API_PKT_PTS
370  dst->pkt_pts = src->pkt_pts;
372 #endif
373  dst->pkt_dts = src->pkt_dts;
374  dst->pkt_pos = src->pkt_pos;
375  dst->pkt_size = src->pkt_size;
376  dst->pkt_duration = src->pkt_duration;
377  dst->reordered_opaque = src->reordered_opaque;
378  dst->quality = src->quality;
379  dst->best_effort_timestamp = src->best_effort_timestamp;
380  dst->coded_picture_number = src->coded_picture_number;
381  dst->display_picture_number = src->display_picture_number;
382  dst->flags = src->flags;
383  dst->decode_error_flags = src->decode_error_flags;
384  dst->color_primaries = src->color_primaries;
385  dst->color_trc = src->color_trc;
386  dst->colorspace = src->colorspace;
387  dst->color_range = src->color_range;
388  dst->chroma_location = src->chroma_location;
389 
390  av_dict_copy(&dst->metadata, src->metadata, 0);
391 
392 #if FF_API_ERROR_FRAME
394  memcpy(dst->error, src->error, sizeof(dst->error));
396 #endif
397 
398  for (i = 0; i < src->nb_side_data; i++) {
399  const AVFrameSideData *sd_src = src->side_data[i];
400  AVFrameSideData *sd_dst;
401  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
402  && (src->width != dst->width || src->height != dst->height))
403  continue;
404  if (force_copy) {
405  sd_dst = av_frame_new_side_data(dst, sd_src->type,
406  sd_src->size);
407  if (!sd_dst) {
408  wipe_side_data(dst);
409  return AVERROR(ENOMEM);
410  }
411  memcpy(sd_dst->data, sd_src->data, sd_src->size);
412  } else {
413  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
414  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
415  if (!sd_dst) {
417  wipe_side_data(dst);
418  return AVERROR(ENOMEM);
419  }
420  }
421  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
422  }
423 
424 #if FF_API_FRAME_QP
426  dst->qscale_table = NULL;
427  dst->qstride = 0;
428  dst->qscale_type = 0;
429  av_buffer_replace(&dst->qp_table_buf, src->qp_table_buf);
430  if (dst->qp_table_buf) {
431  dst->qscale_table = dst->qp_table_buf->data;
432  dst->qstride = src->qstride;
433  dst->qscale_type = src->qscale_type;
434  }
436 #endif
437 
438  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
439  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
440  return ret;
441 }
442 
443 int av_frame_ref(AVFrame *dst, const AVFrame *src)
444 {
445  int i, ret = 0;
446 
447  av_assert1(dst->width == 0 && dst->height == 0);
448  av_assert1(dst->channels == 0);
449 
450  dst->format = src->format;
451  dst->width = src->width;
452  dst->height = src->height;
453  dst->channels = src->channels;
454  dst->channel_layout = src->channel_layout;
455  dst->nb_samples = src->nb_samples;
456 
457  ret = frame_copy_props(dst, src, 0);
458  if (ret < 0)
459  goto fail;
460 
461  /* duplicate the frame data if it's not refcounted */
462  if (!src->buf[0]) {
463  ret = av_frame_get_buffer(dst, 0);
464  if (ret < 0)
465  goto fail;
466 
467  ret = av_frame_copy(dst, src);
468  if (ret < 0)
469  goto fail;
470 
471  return ret;
472  }
473 
474  /* ref the buffers */
475  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
476  if (!src->buf[i])
477  continue;
478  dst->buf[i] = av_buffer_ref(src->buf[i]);
479  if (!dst->buf[i]) {
480  ret = AVERROR(ENOMEM);
481  goto fail;
482  }
483  }
484 
485  if (src->extended_buf) {
486  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
487  src->nb_extended_buf);
488  if (!dst->extended_buf) {
489  ret = AVERROR(ENOMEM);
490  goto fail;
491  }
492  dst->nb_extended_buf = src->nb_extended_buf;
493 
494  for (i = 0; i < src->nb_extended_buf; i++) {
495  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
496  if (!dst->extended_buf[i]) {
497  ret = AVERROR(ENOMEM);
498  goto fail;
499  }
500  }
501  }
502 
503  if (src->hw_frames_ctx) {
504  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
505  if (!dst->hw_frames_ctx) {
506  ret = AVERROR(ENOMEM);
507  goto fail;
508  }
509  }
510 
511  /* duplicate extended data */
512  if (src->extended_data != src->data) {
513  int ch = src->channels;
514 
515  if (!ch) {
516  ret = AVERROR(EINVAL);
517  goto fail;
518  }
520 
521  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
522  if (!dst->extended_data) {
523  ret = AVERROR(ENOMEM);
524  goto fail;
525  }
526  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
527  } else
528  dst->extended_data = dst->data;
529 
530  memcpy(dst->data, src->data, sizeof(src->data));
531  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
532 
533  return 0;
534 
535 fail:
536  av_frame_unref(dst);
537  return ret;
538 }
539 
541 {
543 
544  if (!ret)
545  return NULL;
546 
547  if (av_frame_ref(ret, src) < 0)
548  av_frame_free(&ret);
549 
550  return ret;
551 }
552 
554 {
555  int i;
556 
557  if (!frame)
558  return;
559 
561 
562  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
563  av_buffer_unref(&frame->buf[i]);
564  for (i = 0; i < frame->nb_extended_buf; i++)
565  av_buffer_unref(&frame->extended_buf[i]);
566  av_freep(&frame->extended_buf);
567  av_dict_free(&frame->metadata);
568 #if FF_API_FRAME_QP
570  av_buffer_unref(&frame->qp_table_buf);
572 #endif
573 
574  av_buffer_unref(&frame->hw_frames_ctx);
575 
576  av_buffer_unref(&frame->opaque_ref);
577  av_buffer_unref(&frame->private_ref);
578 
580 }
581 
583 {
584  av_assert1(dst->width == 0 && dst->height == 0);
585  av_assert1(dst->channels == 0);
586 
587  *dst = *src;
588  if (src->extended_data == src->data)
589  dst->extended_data = dst->data;
590  memset(src, 0, sizeof(*src));
592 }
593 
595 {
596  int i, ret = 1;
597 
598  /* assume non-refcounted frames are not writable */
599  if (!frame->buf[0])
600  return 0;
601 
602  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
603  if (frame->buf[i])
604  ret &= !!av_buffer_is_writable(frame->buf[i]);
605  for (i = 0; i < frame->nb_extended_buf; i++)
606  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
607 
608  return ret;
609 }
610 
612 {
613  AVFrame tmp;
614  int ret;
615 
616  if (!frame->buf[0])
617  return AVERROR(EINVAL);
618 
620  return 0;
621 
622  memset(&tmp, 0, sizeof(tmp));
623  tmp.format = frame->format;
624  tmp.width = frame->width;
625  tmp.height = frame->height;
626  tmp.channels = frame->channels;
627  tmp.channel_layout = frame->channel_layout;
628  tmp.nb_samples = frame->nb_samples;
629 
630  if (frame->hw_frames_ctx)
631  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
632  else
633  ret = av_frame_get_buffer(&tmp, 0);
634  if (ret < 0)
635  return ret;
636 
637  ret = av_frame_copy(&tmp, frame);
638  if (ret < 0) {
640  return ret;
641  }
642 
644  if (ret < 0) {
646  return ret;
647  }
648 
650 
651  *frame = tmp;
652  if (tmp.data == tmp.extended_data)
653  frame->extended_data = frame->data;
654 
655  return 0;
656 }
657 
659 {
660  return frame_copy_props(dst, src, 1);
661 }
662 
664 {
665  uint8_t *data;
666  int planes, i;
667 
668  if (frame->nb_samples) {
669  int channels = frame->channels;
670  if (!channels)
671  return NULL;
673  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
674  } else
675  planes = 4;
676 
677  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
678  return NULL;
679  data = frame->extended_data[plane];
680 
681  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
682  AVBufferRef *buf = frame->buf[i];
683  if (data >= buf->data && data < buf->data + buf->size)
684  return buf;
685  }
686  for (i = 0; i < frame->nb_extended_buf; i++) {
687  AVBufferRef *buf = frame->extended_buf[i];
688  if (data >= buf->data && data < buf->data + buf->size)
689  return buf;
690  }
691  return NULL;
692 }
693 
696  AVBufferRef *buf)
697 {
698  AVFrameSideData *ret, **tmp;
699 
700  if (!buf)
701  return NULL;
702 
703  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
704  return NULL;
705 
706  tmp = av_realloc(frame->side_data,
707  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
708  if (!tmp)
709  return NULL;
710  frame->side_data = tmp;
711 
712  ret = av_mallocz(sizeof(*ret));
713  if (!ret)
714  return NULL;
715 
716  ret->buf = buf;
717  ret->data = ret->buf->data;
718  ret->size = buf->size;
719  ret->type = type;
720 
721  frame->side_data[frame->nb_side_data++] = ret;
722 
723  return ret;
724 }
725 
729 {
733  if (!ret)
734  av_buffer_unref(&buf);
735  return ret;
736 }
737 
740 {
741  int i;
742 
743  for (i = 0; i < frame->nb_side_data; i++) {
744  if (frame->side_data[i]->type == type)
745  return frame->side_data[i];
746  }
747  return NULL;
748 }
749 
750 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
751 {
752  const uint8_t *src_data[4];
753  int i, planes;
754 
755  if (dst->width < src->width ||
756  dst->height < src->height)
757  return AVERROR(EINVAL);
758 
759  if (src->hw_frames_ctx || dst->hw_frames_ctx)
760  return av_hwframe_transfer_data(dst, src, 0);
761 
763  for (i = 0; i < planes; i++)
764  if (!dst->data[i] || !src->data[i])
765  return AVERROR(EINVAL);
766 
767  memcpy(src_data, src->data, sizeof(src_data));
768  av_image_copy(dst->data, dst->linesize,
769  src_data, src->linesize,
770  dst->format, src->width, src->height);
771 
772  return 0;
773 }
774 
775 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
776 {
778  int channels = dst->channels;
779  int planes = planar ? channels : 1;
780  int i;
781 
782  if (dst->nb_samples != src->nb_samples ||
783  dst->channels != src->channels ||
784  dst->channel_layout != src->channel_layout)
785  return AVERROR(EINVAL);
786 
788 
789  for (i = 0; i < planes; i++)
790  if (!dst->extended_data[i] || !src->extended_data[i])
791  return AVERROR(EINVAL);
792 
793  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
794  dst->nb_samples, channels, dst->format);
795 
796  return 0;
797 }
798 
799 int av_frame_copy(AVFrame *dst, const AVFrame *src)
800 {
801  if (dst->format != src->format || dst->format < 0)
802  return AVERROR(EINVAL);
803 
804  if (dst->width > 0 && dst->height > 0)
805  return frame_copy_video(dst, src);
806  else if (dst->nb_samples > 0 && dst->channels > 0)
807  return frame_copy_audio(dst, src);
808 
809  return AVERROR(EINVAL);
810 }
811 
813 {
814  int i;
815 
816  for (i = frame->nb_side_data - 1; i >= 0; i--) {
817  AVFrameSideData *sd = frame->side_data[i];
818  if (sd->type == type) {
819  free_side_data(&frame->side_data[i]);
820  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
821  frame->nb_side_data--;
822  }
823  }
824 }
825 
827 {
828  switch(type) {
829  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
830  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
831  case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
832  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
833  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
834  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
835  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
836  case AV_FRAME_DATA_AFD: return "Active format description";
837  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
838  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
839  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
840  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
841  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
842  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
843  case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
844  case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
845  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
846 #if FF_API_FRAME_QP
847  case AV_FRAME_DATA_QP_TABLE_PROPERTIES: return "QP table properties";
848  case AV_FRAME_DATA_QP_TABLE_DATA: return "QP table data";
849 #endif
850  case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
851  case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
852  case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
853  case AV_FRAME_DATA_SEI_UNREGISTERED: return "H.26[45] User Data Unregistered SEI message";
854  case AV_FRAME_DATA_FILM_GRAIN_PARAMS: return "Film grain parameters";
855  }
856  return NULL;
857 }
858 
859 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
860  const AVPixFmtDescriptor *desc)
861 {
862  int i, j;
863 
864  for (i = 0; frame->data[i]; i++) {
866  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
867  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
868 
869  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL) && i == 1) {
870  offsets[i] = 0;
871  break;
872  }
873 
874  /* find any component descriptor for this plane */
875  for (j = 0; j < desc->nb_components; j++) {
876  if (desc->comp[j].plane == i) {
877  comp = &desc->comp[j];
878  break;
879  }
880  }
881  if (!comp)
882  return AVERROR_BUG;
883 
884  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
885  (frame->crop_left >> shift_x) * comp->step;
886  }
887 
888  return 0;
889 }
890 
892 {
893  const AVPixFmtDescriptor *desc;
894  size_t offsets[4];
895  int i;
896 
897  if (!(frame->width > 0 && frame->height > 0))
898  return AVERROR(EINVAL);
899 
900  if (frame->crop_left >= INT_MAX - frame->crop_right ||
901  frame->crop_top >= INT_MAX - frame->crop_bottom ||
902  (frame->crop_left + frame->crop_right) >= frame->width ||
903  (frame->crop_top + frame->crop_bottom) >= frame->height)
904  return AVERROR(ERANGE);
905 
906  desc = av_pix_fmt_desc_get(frame->format);
907  if (!desc)
908  return AVERROR_BUG;
909 
910  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
911  * formats cannot be easily handled here either (and corresponding decoders
912  * should not export any cropping anyway), so do the same for those as well.
913  * */
915  frame->width -= frame->crop_right;
916  frame->height -= frame->crop_bottom;
917  frame->crop_right = 0;
918  frame->crop_bottom = 0;
919  return 0;
920  }
921 
922  /* calculate the offsets for each plane */
924 
925  /* adjust the offsets to avoid breaking alignment */
926  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
927  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
928  int min_log2_align = INT_MAX;
929 
930  for (i = 0; frame->data[i]; i++) {
931  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
932  min_log2_align = FFMIN(log2_align, min_log2_align);
933  }
934 
935  /* we assume, and it should always be true, that the data alignment is
936  * related to the cropping alignment by a constant power-of-2 factor */
937  if (log2_crop_align < min_log2_align)
938  return AVERROR_BUG;
939 
940  if (min_log2_align < 5) {
941  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
943  }
944  }
945 
946  for (i = 0; frame->data[i]; i++)
947  frame->data[i] += offsets[i];
948 
949  frame->width -= (frame->crop_left + frame->crop_right);
950  frame->height -= (frame->crop_top + frame->crop_bottom);
951  frame->crop_left = 0;
952  frame->crop_right = 0;
953  frame->crop_top = 0;
954  frame->crop_bottom = 0;
955 
956  return 0;
957 }
AVFrame::extended_buf
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers,...
Definition: frame.h:523
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:566
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:169
stride
int stride
Definition: mace.c:144
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:562
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
av_buffer_alloc
AVBufferRef * av_buffer_alloc(buffer_size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFrame::qstride
attribute_deprecated int qstride
QP store stride.
Definition: frame.h:645
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:212
AV_FRAME_DATA_QP_TABLE_PROPERTIES
@ AV_FRAME_DATA_QP_TABLE_PROPERTIES
Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
Definition: frame.h:152
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
ff_ctz
#define ff_ctz
Definition: intmath.h:106
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:738
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:85
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:92
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:726
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:432
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:200
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:564
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame::opaque
void * opaque
for some private data of the user
Definition: frame.h:446
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:573
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:27
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:229
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
AVFrame::width
int width
Definition: frame.h:376
AVCOL_SPC_YCOCG
@ AVCOL_SPC_YCOCG
Definition: pixfmt.h:522
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:470
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:486
data
const char data[16]
Definition: mxf.c:142
AVFrame::pkt_duration
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:597
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:350
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:513
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:967
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:281
AVDictionary
Definition: dict.c:30
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:555
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:509
sample_rate
sample_rate
Definition: ffmpeg_filter.c:170
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:750
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:891
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
av_frame_get_qp_table
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:91
AVFrame::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:668
AVBufferRef::size
int size
Size of data in bytes.
Definition: buffer.h:97
AVFrame::chroma_location
enum AVChromaLocation chroma_location
Definition: frame.h:575
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
qp_properties::stride
int stride
Definition: frame.c:51
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:518
av_get_colorspace_name
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:123
AVFrame::qscale_table
attribute_deprecated int8_t * qscale_table
QP table.
Definition: frame.h:640
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
fail
#define fail()
Definition: checkasm.h:133
MAKE_ACCESSORS
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:90
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
samplefmt.h
AVFrame::qp_table_buf
attribute_deprecated AVBufferRef * qp_table_buf
Definition: frame.h:651
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
wipe_side_data
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:178
val
static double val(void *priv, double ch)
Definition: aeval.c:76
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:146
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:48
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:139
avassert.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
offsets
static const int offsets[]
Definition: hevc_pel.c:34
buffer_size_t
int buffer_size_t
Definition: internal.h:306
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:624
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:519
AVFrame::pkt_pos
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:589
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
channels
channels
Definition: aptx.h:33
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
AVFrame::crop_right
size_t crop_right
Definition: frame.h:681
color_range
color_range
Definition: vf_selectivecolor.c:44
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:461
f
#define f(width, name)
Definition: cbs_vp9.c:255
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:775
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:130
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:53
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AVComponentDescriptor
Definition: pixdesc.h:31
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
src
#define src
Definition: vp8dsp.c:255
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:119
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:694
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:427
AVFrame::pkt_pts
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:419
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:552
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:194
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:226
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:679
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:582
AVFrame::crop_left
size_t crop_left
Definition: frame.h:680
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:799
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:441
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:490
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
size
int size
Definition: twinvq_data.h:10344
AVFrame::error
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:453
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:319
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:136
AVFrameSideData::data
uint8_t * data
Definition: frame.h:222
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:606
qp_properties
Definition: frame.c:50
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
frame.h
buffer.h
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:812
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
AVFrame::channel_layout
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:495
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
AVFrame::private_ref
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:697
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:108
av_realloc
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:134
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:520
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:465
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:136
i
int i
Definition: input.c:407
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:365
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:512
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:63
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:515
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:133
qp_properties::type
int type
Definition: frame.c:52
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:613
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:406
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:443
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:657
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:175
AVFrame::height
int height
Definition: frame.h:376
AVFrame::qscale_type
attribute_deprecated int qscale_type
Definition: frame.h:648
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
channel_layout.h
AVFrame::palette_has_changed
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:475
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:219
AV_FRAME_DATA_VIDEO_ENC_PARAMS
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:186
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:517
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:604
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:221
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVFrame::pkt_size
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:633
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AVFrame::reordered_opaque
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything).
Definition: frame.h:485
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
planes
static const struct @322 planes[]
desc
const char * desc
Definition: libsvtav1.c:79
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:84
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:220
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AV_FRAME_DATA_QP_TABLE_DATA
@ AV_FRAME_DATA_QP_TABLE_DATA
Raw QP table data.
Definition: frame.h:159
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:859
AVFrameSideData::size
int size
Definition: frame.h:224
AVFrame::crop_top
size_t crop_top
Definition: frame.h:678
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:826
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:181
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:228
FF_PSEUDOPAL
#define FF_PSEUDOPAL
Definition: internal.h:299
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:663
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
CHECK_CHANNELS_CONSISTENCY
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:44
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:551
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:436
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:72
av_frame_set_qp_table
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55
AVFrame::repeat_pict
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:460
AVFrame::nb_extended_buf
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:527