FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 
29 #if FF_API_FRAME_GET_SET
30 MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
31 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
32 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
33 MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
37 MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
38 MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
39 MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
41 #endif
42 
43 #define CHECK_CHANNELS_CONSISTENCY(frame) \
44  av_assert2(!(frame)->channel_layout || \
45  (frame)->channels == \
46  av_get_channel_layout_nb_channels((frame)->channel_layout))
47 
48 #if FF_API_FRAME_QP
49 struct qp_properties {
50  int stride;
51  int type;
52 };
53 
55 {
56  struct qp_properties *p;
57  AVFrameSideData *sd;
59 
61  av_buffer_unref(&f->qp_table_buf);
62 
63  f->qp_table_buf = buf;
64  f->qscale_table = buf->data;
65  f->qstride = stride;
66  f->qscale_type = qp_type;
68 
71 
75  return AVERROR(ENOMEM);
76  }
77 
79  sizeof(struct qp_properties));
80  if (!sd)
81  return AVERROR(ENOMEM);
82 
83  p = (struct qp_properties *)sd->data;
84  p->stride = stride;
85  p->type = qp_type;
86 
87  return 0;
88 }
89 
90 int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
91 {
92  AVBufferRef *buf = NULL;
93 
94  *stride = 0;
95  *type = 0;
96 
98  if (f->qp_table_buf) {
99  *stride = f->qstride;
100  *type = f->qscale_type;
101  buf = f->qp_table_buf;
103  } else {
104  AVFrameSideData *sd;
105  struct qp_properties *p;
107  if (!sd)
108  return NULL;
109  p = (struct qp_properties *)sd->data;
111  if (!sd)
112  return NULL;
113  *stride = p->stride;
114  *type = p->type;
115  buf = sd->buf;
116  }
117 
118  return buf ? buf->data : NULL;
119 }
120 #endif
121 
123 {
124  static const char * const name[] = {
125  [AVCOL_SPC_RGB] = "GBR",
126  [AVCOL_SPC_BT709] = "bt709",
127  [AVCOL_SPC_FCC] = "fcc",
128  [AVCOL_SPC_BT470BG] = "bt470bg",
129  [AVCOL_SPC_SMPTE170M] = "smpte170m",
130  [AVCOL_SPC_SMPTE240M] = "smpte240m",
131  [AVCOL_SPC_YCOCG] = "YCgCo",
132  };
133  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
134  return NULL;
135  return name[val];
136 }
137 
139 {
140  if (frame->extended_data != frame->data)
141  av_freep(&frame->extended_data);
142 
143  memset(frame, 0, sizeof(*frame));
144 
145  frame->pts =
146  frame->pkt_dts = AV_NOPTS_VALUE;
147 #if FF_API_PKT_PTS
149  frame->pkt_pts = AV_NOPTS_VALUE;
151 #endif
152  frame->best_effort_timestamp = AV_NOPTS_VALUE;
153  frame->pkt_duration = 0;
154  frame->pkt_pos = -1;
155  frame->pkt_size = -1;
156  frame->key_frame = 1;
157  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
158  frame->format = -1; /* unknown */
159  frame->extended_data = frame->data;
160  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
161  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
162  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
163  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
164  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
165  frame->flags = 0;
166 }
167 
168 static void free_side_data(AVFrameSideData **ptr_sd)
169 {
170  AVFrameSideData *sd = *ptr_sd;
171 
172  av_buffer_unref(&sd->buf);
173  av_dict_free(&sd->metadata);
174  av_freep(ptr_sd);
175 }
176 
178 {
179  int i;
180 
181  for (i = 0; i < frame->nb_side_data; i++) {
182  free_side_data(&frame->side_data[i]);
183  }
184  frame->nb_side_data = 0;
185 
186  av_freep(&frame->side_data);
187 }
188 
190 {
191  AVFrame *frame = av_mallocz(sizeof(*frame));
192 
193  if (!frame)
194  return NULL;
195 
196  frame->extended_data = NULL;
198 
199  return frame;
200 }
201 
203 {
204  if (!frame || !*frame)
205  return;
206 
208  av_freep(frame);
209 }
210 
212 {
214  int ret, i, padded_height;
215  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
216 
217  if (!desc)
218  return AVERROR(EINVAL);
219 
220  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
221  return ret;
222 
223  if (!frame->linesize[0]) {
224  if (align <= 0)
225  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
226 
227  for(i=1; i<=align; i+=i) {
228  ret = av_image_fill_linesizes(frame->linesize, frame->format,
229  FFALIGN(frame->width, i));
230  if (ret < 0)
231  return ret;
232  if (!(frame->linesize[0] & (align-1)))
233  break;
234  }
235 
236  for (i = 0; i < 4 && frame->linesize[i]; i++)
237  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
238  }
239 
240  padded_height = FFALIGN(frame->height, 32);
241  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
242  NULL, frame->linesize)) < 0)
243  return ret;
244 
245  frame->buf[0] = av_buffer_alloc(ret + 4*plane_padding);
246  if (!frame->buf[0]) {
247  ret = AVERROR(ENOMEM);
248  goto fail;
249  }
250 
251  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
252  frame->buf[0]->data, frame->linesize)) < 0)
253  goto fail;
254 
255  for (i = 1; i < 4; i++) {
256  if (frame->data[i])
257  frame->data[i] += i * plane_padding;
258  }
259 
260  frame->extended_data = frame->data;
261 
262  return 0;
263 fail:
265  return ret;
266 }
267 
269 {
270  int channels;
271  int planar = av_sample_fmt_is_planar(frame->format);
272  int planes;
273  int ret, i;
274 
275  if (!frame->channels)
276  frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);
277 
278  channels = frame->channels;
279  planes = planar ? channels : 1;
280 
282  if (!frame->linesize[0]) {
283  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
284  frame->nb_samples, frame->format,
285  align);
286  if (ret < 0)
287  return ret;
288  }
289 
291  frame->extended_data = av_mallocz_array(planes,
292  sizeof(*frame->extended_data));
294  sizeof(*frame->extended_buf));
295  if (!frame->extended_data || !frame->extended_buf) {
296  av_freep(&frame->extended_data);
297  av_freep(&frame->extended_buf);
298  return AVERROR(ENOMEM);
299  }
300  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
301  } else
302  frame->extended_data = frame->data;
303 
304  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
305  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
306  if (!frame->buf[i]) {
308  return AVERROR(ENOMEM);
309  }
310  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
311  }
312  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
313  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
314  if (!frame->extended_buf[i]) {
316  return AVERROR(ENOMEM);
317  }
318  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
319  }
320  return 0;
321 
322 }
323 
325 {
326  if (frame->format < 0)
327  return AVERROR(EINVAL);
328 
329  if (frame->width > 0 && frame->height > 0)
330  return get_video_buffer(frame, align);
331  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
332  return get_audio_buffer(frame, align);
333 
334  return AVERROR(EINVAL);
335 }
336 
337 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
338 {
339  int i;
340 
341  dst->key_frame = src->key_frame;
342  dst->pict_type = src->pict_type;
343  dst->sample_aspect_ratio = src->sample_aspect_ratio;
344  dst->crop_top = src->crop_top;
345  dst->crop_bottom = src->crop_bottom;
346  dst->crop_left = src->crop_left;
347  dst->crop_right = src->crop_right;
348  dst->pts = src->pts;
349  dst->repeat_pict = src->repeat_pict;
350  dst->interlaced_frame = src->interlaced_frame;
351  dst->top_field_first = src->top_field_first;
352  dst->palette_has_changed = src->palette_has_changed;
353  dst->sample_rate = src->sample_rate;
354  dst->opaque = src->opaque;
355 #if FF_API_PKT_PTS
357  dst->pkt_pts = src->pkt_pts;
359 #endif
360  dst->pkt_dts = src->pkt_dts;
361  dst->pkt_pos = src->pkt_pos;
362  dst->pkt_size = src->pkt_size;
363  dst->pkt_duration = src->pkt_duration;
364  dst->reordered_opaque = src->reordered_opaque;
365  dst->quality = src->quality;
366  dst->best_effort_timestamp = src->best_effort_timestamp;
367  dst->coded_picture_number = src->coded_picture_number;
368  dst->display_picture_number = src->display_picture_number;
369  dst->flags = src->flags;
370  dst->decode_error_flags = src->decode_error_flags;
371  dst->color_primaries = src->color_primaries;
372  dst->color_trc = src->color_trc;
373  dst->colorspace = src->colorspace;
374  dst->color_range = src->color_range;
375  dst->chroma_location = src->chroma_location;
376 
377  av_dict_copy(&dst->metadata, src->metadata, 0);
378 
379 #if FF_API_ERROR_FRAME
381  memcpy(dst->error, src->error, sizeof(dst->error));
383 #endif
384 
385  for (i = 0; i < src->nb_side_data; i++) {
386  const AVFrameSideData *sd_src = src->side_data[i];
387  AVFrameSideData *sd_dst;
388  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
389  && (src->width != dst->width || src->height != dst->height))
390  continue;
391  if (force_copy) {
392  sd_dst = av_frame_new_side_data(dst, sd_src->type,
393  sd_src->size);
394  if (!sd_dst) {
395  wipe_side_data(dst);
396  return AVERROR(ENOMEM);
397  }
398  memcpy(sd_dst->data, sd_src->data, sd_src->size);
399  } else {
400  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
401  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
402  if (!sd_dst) {
404  wipe_side_data(dst);
405  return AVERROR(ENOMEM);
406  }
407  }
408  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
409  }
410 
411 #if FF_API_FRAME_QP
413  dst->qscale_table = NULL;
414  dst->qstride = 0;
415  dst->qscale_type = 0;
417  if (src->qp_table_buf) {
418  dst->qp_table_buf = av_buffer_ref(src->qp_table_buf);
419  if (dst->qp_table_buf) {
420  dst->qscale_table = dst->qp_table_buf->data;
421  dst->qstride = src->qstride;
422  dst->qscale_type = src->qscale_type;
423  }
424  }
426 #endif
427 
430  if (src->opaque_ref) {
431  dst->opaque_ref = av_buffer_ref(src->opaque_ref);
432  if (!dst->opaque_ref)
433  return AVERROR(ENOMEM);
434  }
435  if (src->private_ref) {
436  dst->private_ref = av_buffer_ref(src->private_ref);
437  if (!dst->private_ref)
438  return AVERROR(ENOMEM);
439  }
440  return 0;
441 }
442 
443 int av_frame_ref(AVFrame *dst, const AVFrame *src)
444 {
445  int i, ret = 0;
446 
447  av_assert1(dst->width == 0 && dst->height == 0);
448  av_assert1(dst->channels == 0);
449 
450  dst->format = src->format;
451  dst->width = src->width;
452  dst->height = src->height;
453  dst->channels = src->channels;
454  dst->channel_layout = src->channel_layout;
455  dst->nb_samples = src->nb_samples;
456 
457  ret = frame_copy_props(dst, src, 0);
458  if (ret < 0)
459  return ret;
460 
461  /* duplicate the frame data if it's not refcounted */
462  if (!src->buf[0]) {
463  ret = av_frame_get_buffer(dst, 32);
464  if (ret < 0)
465  return ret;
466 
467  ret = av_frame_copy(dst, src);
468  if (ret < 0)
469  av_frame_unref(dst);
470 
471  return ret;
472  }
473 
474  /* ref the buffers */
475  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
476  if (!src->buf[i])
477  continue;
478  dst->buf[i] = av_buffer_ref(src->buf[i]);
479  if (!dst->buf[i]) {
480  ret = AVERROR(ENOMEM);
481  goto fail;
482  }
483  }
484 
485  if (src->extended_buf) {
486  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
487  src->nb_extended_buf);
488  if (!dst->extended_buf) {
489  ret = AVERROR(ENOMEM);
490  goto fail;
491  }
492  dst->nb_extended_buf = src->nb_extended_buf;
493 
494  for (i = 0; i < src->nb_extended_buf; i++) {
495  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
496  if (!dst->extended_buf[i]) {
497  ret = AVERROR(ENOMEM);
498  goto fail;
499  }
500  }
501  }
502 
503  if (src->hw_frames_ctx) {
504  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
505  if (!dst->hw_frames_ctx) {
506  ret = AVERROR(ENOMEM);
507  goto fail;
508  }
509  }
510 
511  /* duplicate extended data */
512  if (src->extended_data != src->data) {
513  int ch = src->channels;
514 
515  if (!ch) {
516  ret = AVERROR(EINVAL);
517  goto fail;
518  }
520 
521  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
522  if (!dst->extended_data) {
523  ret = AVERROR(ENOMEM);
524  goto fail;
525  }
526  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
527  } else
528  dst->extended_data = dst->data;
529 
530  memcpy(dst->data, src->data, sizeof(src->data));
531  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
532 
533  return 0;
534 
535 fail:
536  av_frame_unref(dst);
537  return ret;
538 }
539 
541 {
543 
544  if (!ret)
545  return NULL;
546 
547  if (av_frame_ref(ret, src) < 0)
548  av_frame_free(&ret);
549 
550  return ret;
551 }
552 
554 {
555  int i;
556 
557  if (!frame)
558  return;
559 
561 
562  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
563  av_buffer_unref(&frame->buf[i]);
564  for (i = 0; i < frame->nb_extended_buf; i++)
565  av_buffer_unref(&frame->extended_buf[i]);
566  av_freep(&frame->extended_buf);
567  av_dict_free(&frame->metadata);
568 #if FF_API_FRAME_QP
570  av_buffer_unref(&frame->qp_table_buf);
572 #endif
573 
574  av_buffer_unref(&frame->hw_frames_ctx);
575 
576  av_buffer_unref(&frame->opaque_ref);
577  av_buffer_unref(&frame->private_ref);
578 
580 }
581 
583 {
584  av_assert1(dst->width == 0 && dst->height == 0);
585  av_assert1(dst->channels == 0);
586 
587  *dst = *src;
588  if (src->extended_data == src->data)
589  dst->extended_data = dst->data;
590  memset(src, 0, sizeof(*src));
592 }
593 
595 {
596  int i, ret = 1;
597 
598  /* assume non-refcounted frames are not writable */
599  if (!frame->buf[0])
600  return 0;
601 
602  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
603  if (frame->buf[i])
604  ret &= !!av_buffer_is_writable(frame->buf[i]);
605  for (i = 0; i < frame->nb_extended_buf; i++)
606  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
607 
608  return ret;
609 }
610 
612 {
613  AVFrame tmp;
614  int ret;
615 
616  if (!frame->buf[0])
617  return AVERROR(EINVAL);
618 
620  return 0;
621 
622  memset(&tmp, 0, sizeof(tmp));
623  tmp.format = frame->format;
624  tmp.width = frame->width;
625  tmp.height = frame->height;
626  tmp.channels = frame->channels;
627  tmp.channel_layout = frame->channel_layout;
628  tmp.nb_samples = frame->nb_samples;
629  ret = av_frame_get_buffer(&tmp, 32);
630  if (ret < 0)
631  return ret;
632 
633  ret = av_frame_copy(&tmp, frame);
634  if (ret < 0) {
636  return ret;
637  }
638 
640  if (ret < 0) {
642  return ret;
643  }
644 
646 
647  *frame = tmp;
648  if (tmp.data == tmp.extended_data)
649  frame->extended_data = frame->data;
650 
651  return 0;
652 }
653 
655 {
656  return frame_copy_props(dst, src, 1);
657 }
658 
660 {
661  uint8_t *data;
662  int planes, i;
663 
664  if (frame->nb_samples) {
665  int channels = frame->channels;
666  if (!channels)
667  return NULL;
669  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
670  } else
671  planes = 4;
672 
673  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
674  return NULL;
675  data = frame->extended_data[plane];
676 
677  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
678  AVBufferRef *buf = frame->buf[i];
679  if (data >= buf->data && data < buf->data + buf->size)
680  return buf;
681  }
682  for (i = 0; i < frame->nb_extended_buf; i++) {
683  AVBufferRef *buf = frame->extended_buf[i];
684  if (data >= buf->data && data < buf->data + buf->size)
685  return buf;
686  }
687  return NULL;
688 }
689 
692  AVBufferRef *buf)
693 {
694  AVFrameSideData *ret, **tmp;
695 
696  if (!buf)
697  return NULL;
698 
699  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
700  return NULL;
701 
702  tmp = av_realloc(frame->side_data,
703  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
704  if (!tmp)
705  return NULL;
706  frame->side_data = tmp;
707 
708  ret = av_mallocz(sizeof(*ret));
709  if (!ret)
710  return NULL;
711 
712  ret->buf = buf;
713  ret->data = ret->buf->data;
714  ret->size = buf->size;
715  ret->type = type;
716 
717  frame->side_data[frame->nb_side_data++] = ret;
718 
719  return ret;
720 }
721 
724  int size)
725 {
729  if (!ret)
731  return ret;
732 }
733 
736 {
737  int i;
738 
739  for (i = 0; i < frame->nb_side_data; i++) {
740  if (frame->side_data[i]->type == type)
741  return frame->side_data[i];
742  }
743  return NULL;
744 }
745 
746 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
747 {
748  const uint8_t *src_data[4];
749  int i, planes;
750 
751  if (dst->width < src->width ||
752  dst->height < src->height)
753  return AVERROR(EINVAL);
754 
756  for (i = 0; i < planes; i++)
757  if (!dst->data[i] || !src->data[i])
758  return AVERROR(EINVAL);
759 
760  memcpy(src_data, src->data, sizeof(src_data));
761  av_image_copy(dst->data, dst->linesize,
762  src_data, src->linesize,
763  dst->format, src->width, src->height);
764 
765  return 0;
766 }
767 
768 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
769 {
771  int channels = dst->channels;
772  int planes = planar ? channels : 1;
773  int i;
774 
775  if (dst->nb_samples != src->nb_samples ||
776  dst->channels != src->channels ||
777  dst->channel_layout != src->channel_layout)
778  return AVERROR(EINVAL);
779 
781 
782  for (i = 0; i < planes; i++)
783  if (!dst->extended_data[i] || !src->extended_data[i])
784  return AVERROR(EINVAL);
785 
786  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
787  dst->nb_samples, channels, dst->format);
788 
789  return 0;
790 }
791 
792 int av_frame_copy(AVFrame *dst, const AVFrame *src)
793 {
794  if (dst->format != src->format || dst->format < 0)
795  return AVERROR(EINVAL);
796 
797  if (dst->width > 0 && dst->height > 0)
798  return frame_copy_video(dst, src);
799  else if (dst->nb_samples > 0 && dst->channels > 0)
800  return frame_copy_audio(dst, src);
801 
802  return AVERROR(EINVAL);
803 }
804 
806 {
807  int i;
808 
809  for (i = 0; i < frame->nb_side_data; i++) {
810  AVFrameSideData *sd = frame->side_data[i];
811  if (sd->type == type) {
812  free_side_data(&frame->side_data[i]);
813  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
814  frame->nb_side_data--;
815  }
816  }
817 }
818 
820 {
821  switch(type) {
822  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
823  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
824  case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
825  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
826  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
827  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
828  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
829  case AV_FRAME_DATA_AFD: return "Active format description";
830  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
831  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
832  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
833  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
834  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
835  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
836  case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
837  case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
838  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
839 #if FF_API_FRAME_QP
840  case AV_FRAME_DATA_QP_TABLE_PROPERTIES: return "QP table properties";
841  case AV_FRAME_DATA_QP_TABLE_DATA: return "QP table data";
842 #endif
843  case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
844  case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
845  }
846  return NULL;
847 }
848 
849 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
850  const AVPixFmtDescriptor *desc)
851 {
852  int i, j;
853 
854  for (i = 0; frame->data[i]; i++) {
856  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
857  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
858 
859  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL) && i == 1) {
860  offsets[i] = 0;
861  break;
862  }
863 
864  /* find any component descriptor for this plane */
865  for (j = 0; j < desc->nb_components; j++) {
866  if (desc->comp[j].plane == i) {
867  comp = &desc->comp[j];
868  break;
869  }
870  }
871  if (!comp)
872  return AVERROR_BUG;
873 
874  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
875  (frame->crop_left >> shift_x) * comp->step;
876  }
877 
878  return 0;
879 }
880 
882 {
883  const AVPixFmtDescriptor *desc;
884  size_t offsets[4];
885  int i;
886 
887  if (!(frame->width > 0 && frame->height > 0))
888  return AVERROR(EINVAL);
889 
890  if (frame->crop_left >= INT_MAX - frame->crop_right ||
891  frame->crop_top >= INT_MAX - frame->crop_bottom ||
892  (frame->crop_left + frame->crop_right) >= frame->width ||
893  (frame->crop_top + frame->crop_bottom) >= frame->height)
894  return AVERROR(ERANGE);
895 
896  desc = av_pix_fmt_desc_get(frame->format);
897  if (!desc)
898  return AVERROR_BUG;
899 
900  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
901  * formats cannot be easily handled here either (and corresponding decoders
902  * should not export any cropping anyway), so do the same for those as well.
903  * */
905  frame->width -= frame->crop_right;
906  frame->height -= frame->crop_bottom;
907  frame->crop_right = 0;
908  frame->crop_bottom = 0;
909  return 0;
910  }
911 
912  /* calculate the offsets for each plane */
913  calc_cropping_offsets(offsets, frame, desc);
914 
915  /* adjust the offsets to avoid breaking alignment */
916  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
917  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
918  int min_log2_align = INT_MAX;
919 
920  for (i = 0; frame->data[i]; i++) {
921  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
922  min_log2_align = FFMIN(log2_align, min_log2_align);
923  }
924 
925  /* we assume, and it should always be true, that the data alignment is
926  * related to the cropping alignment by a constant power-of-2 factor */
927  if (log2_crop_align < min_log2_align)
928  return AVERROR_BUG;
929 
930  if (min_log2_align < 5) {
931  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
932  calc_cropping_offsets(offsets, frame, desc);
933  }
934  }
935 
936  for (i = 0; frame->data[i]; i++)
937  frame->data[i] += offsets[i];
938 
939  frame->width -= (frame->crop_left + frame->crop_right);
940  frame->height -= (frame->crop_top + frame->crop_bottom);
941  frame->crop_left = 0;
942  frame->crop_right = 0;
943  frame->crop_top = 0;
944  frame->crop_bottom = 0;
945 
946  return 0;
947 }
AVFrame::extended_buf
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers,...
Definition: frame.h:500
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:543
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:168
stride
int stride
Definition: mace.c:144
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:539
av_buffer_alloc
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFrame::qstride
attribute_deprecated int qstride
QP store stride.
Definition: frame.h:622
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:211
AV_FRAME_DATA_QP_TABLE_PROPERTIES
@ AV_FRAME_DATA_QP_TABLE_PROPERTIES
Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
Definition: frame.h:152
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:324
ff_ctz
#define ff_ctz
Definition: intmath.h:106
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:734
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:83
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:409
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:541
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVFrame::opaque
void * opaque
for some private data of the user
Definition: frame.h:423
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:550
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:206
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
AVFrame::width
int width
Definition: frame.h:353
AVCOL_SPC_YCOCG
@ AVCOL_SPC_YCOCG
Definition: pixfmt.h:506
name
const char * name
Definition: avisynth_c.h:867
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:447
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:470
data
const char data[16]
Definition: mxf.c:91
AVFrame::pkt_duration
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:574
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:337
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:497
channels
channels
Definition: aptx.c:30
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:268
AVDictionary
Definition: dict.c:30
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:532
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:941
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
sample_rate
sample_rate
Definition: ffmpeg_filter.c:191
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:746
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:881
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
av_frame_get_qp_table
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:90
AVFrame::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:645
AVFrame::chroma_location
enum AVChromaLocation chroma_location
Definition: frame.h:552
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
qp_properties::stride
int stride
Definition: frame.c:50
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:502
av_get_colorspace_name
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:122
AVFrame::qscale_table
attribute_deprecated int8_t * qscale_table
QP table.
Definition: frame.h:617
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
fail
#define fail()
Definition: checkasm.h:120
MAKE_ACCESSORS
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:91
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
plane
int plane
Definition: avisynth_c.h:384
samplefmt.h
AVFrame::qp_table_buf
attribute_deprecated AVBufferRef * qp_table_buf
Definition: frame.h:628
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
wipe_side_data
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:177
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
src
#define src
Definition: vp8dsp.c:254
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:48
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:138
avassert.h
buf
void * buf
Definition: avisynth_c.h:766
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:226
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:503
AVFrame::pkt_pos
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:566
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
AVFrame::crop_right
size_t crop_right
Definition: frame.h:658
color_range
color_range
Definition: vf_selectivecolor.c:44
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:446
f
#define f(width, name)
Definition: cbs_vp9.c:255
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:768
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:130
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AVComponentDescriptor
Definition: pixdesc.h:31
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:119
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:690
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:404
AVFrame::pkt_pts
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:396
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:520
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:220
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:656
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:559
AVFrame::crop_left
size_t crop_left
Definition: frame.h:657
desc
const char * desc
Definition: nvenc.c:68
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
planes
static const struct @314 planes[]
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:418
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:467
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
size
int size
Definition: twinvq_data.h:11134
AVFrame::error
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:430
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:296
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:136
AVFrameSideData::data
uint8_t * data
Definition: frame.h:203
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:542
qp_properties
Definition: frame.c:49
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:368
frame.h
val
const char const char void * val
Definition: avisynth_c.h:863
buffer.h
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
If side data of the supplied type exists in the frame, free it and remove it from the frame.
Definition: frame.c:805
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AVFrame::channel_layout
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
AVFrame::private_ref
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:674
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:108
av_realloc
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:135
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:504
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:136
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:496
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:63
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:499
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:133
qp_properties::type
int type
Definition: frame.c:51
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:590
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
align
const AVS_VideoInfo int align
Definition: avisynth_c.h:887
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:634
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:175
AVFrame::height
int height
Definition: frame.h:353
AVFrame::qscale_type
attribute_deprecated int qscale_type
Definition: frame.h:625
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:387
channel_layout.h
AVFrame::palette_has_changed
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:452
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:501
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:581
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:202
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVFrame::pkt_size
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:610
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AVFrame::reordered_opaque
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything).
Definition: frame.h:462
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:201
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AV_FRAME_DATA_QP_TABLE_DATA
@ AV_FRAME_DATA_QP_TABLE_DATA
Raw QP table data.
Definition: frame.h:159
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:849
AVFrameSideData::size
int size
Definition: frame.h:204
AVFrame::crop_top
size_t crop_top
Definition: frame.h:655
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:819
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:181
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:205
FF_PSEUDOPAL
#define FF_PSEUDOPAL
Definition: internal.h:369
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:659
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
CHECK_CHANNELS_CONSISTENCY
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:43
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:498
AVColorRange
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:519
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:413
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:72
av_frame_set_qp_table
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:54
AVFrame::repeat_pict
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:437
AVFrame::nb_extended_buf
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:504