FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "cpu.h"
24 #include "dict.h"
25 #include "frame.h"
26 #include "imgutils.h"
27 #include "mem.h"
28 #include "samplefmt.h"
29 #include "hwcontext.h"
30 
31 #define CHECK_CHANNELS_CONSISTENCY(frame) \
32  av_assert2(!(frame)->channel_layout || \
33  (frame)->channels == \
34  av_get_channel_layout_nb_channels((frame)->channel_layout))
35 
36 #if FF_API_COLORSPACE_NAME
38 {
39  static const char * const name[] = {
40  [AVCOL_SPC_RGB] = "GBR",
41  [AVCOL_SPC_BT709] = "bt709",
42  [AVCOL_SPC_FCC] = "fcc",
43  [AVCOL_SPC_BT470BG] = "bt470bg",
44  [AVCOL_SPC_SMPTE170M] = "smpte170m",
45  [AVCOL_SPC_SMPTE240M] = "smpte240m",
46  [AVCOL_SPC_YCOCG] = "YCgCo",
47  };
48  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
49  return NULL;
50  return name[val];
51 }
52 #endif
54 {
55  memset(frame, 0, sizeof(*frame));
56 
57  frame->pts =
58  frame->pkt_dts = AV_NOPTS_VALUE;
59  frame->best_effort_timestamp = AV_NOPTS_VALUE;
60  frame->pkt_duration = 0;
61  frame->pkt_pos = -1;
62  frame->pkt_size = -1;
63  frame->time_base = (AVRational){ 0, 1 };
64  frame->key_frame = 1;
65  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
66  frame->format = -1; /* unknown */
67  frame->extended_data = frame->data;
68  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
69  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
70  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
71  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
72  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
73  frame->flags = 0;
74 }
75 
76 static void free_side_data(AVFrameSideData **ptr_sd)
77 {
78  AVFrameSideData *sd = *ptr_sd;
79 
80  av_buffer_unref(&sd->buf);
81  av_dict_free(&sd->metadata);
82  av_freep(ptr_sd);
83 }
84 
86 {
87  int i;
88 
89  for (i = 0; i < frame->nb_side_data; i++) {
90  free_side_data(&frame->side_data[i]);
91  }
92  frame->nb_side_data = 0;
93 
94  av_freep(&frame->side_data);
95 }
96 
98 {
99  AVFrame *frame = av_malloc(sizeof(*frame));
100 
101  if (!frame)
102  return NULL;
103 
105 
106  return frame;
107 }
108 
110 {
111  if (!frame || !*frame)
112  return;
113 
115  av_freep(frame);
116 }
117 
118 static int get_video_buffer(AVFrame *frame, int align)
119 {
121  int ret, i, padded_height, total_size;
122  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
123  ptrdiff_t linesizes[4];
124  size_t sizes[4];
125 
126  if (!desc)
127  return AVERROR(EINVAL);
128 
129  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
130  return ret;
131 
132  if (!frame->linesize[0]) {
133  if (align <= 0)
134  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
135 
136  for(i=1; i<=align; i+=i) {
137  ret = av_image_fill_linesizes(frame->linesize, frame->format,
138  FFALIGN(frame->width, i));
139  if (ret < 0)
140  return ret;
141  if (!(frame->linesize[0] & (align-1)))
142  break;
143  }
144 
145  for (i = 0; i < 4 && frame->linesize[i]; i++)
146  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
147  }
148 
149  for (i = 0; i < 4; i++)
150  linesizes[i] = frame->linesize[i];
151 
152  padded_height = FFALIGN(frame->height, 32);
153  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
154  padded_height, linesizes)) < 0)
155  return ret;
156 
157  total_size = 4*plane_padding;
158  for (i = 0; i < 4; i++) {
159  if (sizes[i] > INT_MAX - total_size)
160  return AVERROR(EINVAL);
161  total_size += sizes[i];
162  }
163 
164  frame->buf[0] = av_buffer_alloc(total_size);
165  if (!frame->buf[0]) {
166  ret = AVERROR(ENOMEM);
167  goto fail;
168  }
169 
170  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
171  frame->buf[0]->data, frame->linesize)) < 0)
172  goto fail;
173 
174  for (i = 1; i < 4; i++) {
175  if (frame->data[i])
176  frame->data[i] += i * plane_padding;
177  }
178 
179  frame->extended_data = frame->data;
180 
181  return 0;
182 fail:
184  return ret;
185 }
186 
187 static int get_audio_buffer(AVFrame *frame, int align)
188 {
189  int channels;
190  int planar = av_sample_fmt_is_planar(frame->format);
191  int planes;
192  int ret, i;
193 
194  if (!frame->channels)
195  frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);
196 
197  channels = frame->channels;
198  planes = planar ? channels : 1;
199 
201  if (!frame->linesize[0]) {
202  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
203  frame->nb_samples, frame->format,
204  align);
205  if (ret < 0)
206  return ret;
207  }
208 
210  frame->extended_data = av_calloc(planes,
211  sizeof(*frame->extended_data));
212  frame->extended_buf = av_calloc(planes - AV_NUM_DATA_POINTERS,
213  sizeof(*frame->extended_buf));
214  if (!frame->extended_data || !frame->extended_buf) {
215  av_freep(&frame->extended_data);
216  av_freep(&frame->extended_buf);
217  return AVERROR(ENOMEM);
218  }
219  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
220  } else
221  frame->extended_data = frame->data;
222 
223  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
224  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
225  if (!frame->buf[i]) {
227  return AVERROR(ENOMEM);
228  }
229  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
230  }
231  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
232  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
233  if (!frame->extended_buf[i]) {
235  return AVERROR(ENOMEM);
236  }
237  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
238  }
239  return 0;
240 
241 }
242 
244 {
245  if (frame->format < 0)
246  return AVERROR(EINVAL);
247 
248  if (frame->width > 0 && frame->height > 0)
249  return get_video_buffer(frame, align);
250  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
251  return get_audio_buffer(frame, align);
252 
253  return AVERROR(EINVAL);
254 }
255 
256 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
257 {
258  int ret, i;
259 
260  dst->key_frame = src->key_frame;
261  dst->pict_type = src->pict_type;
262  dst->sample_aspect_ratio = src->sample_aspect_ratio;
263  dst->crop_top = src->crop_top;
264  dst->crop_bottom = src->crop_bottom;
265  dst->crop_left = src->crop_left;
266  dst->crop_right = src->crop_right;
267  dst->pts = src->pts;
268  dst->repeat_pict = src->repeat_pict;
269  dst->interlaced_frame = src->interlaced_frame;
270  dst->top_field_first = src->top_field_first;
271  dst->palette_has_changed = src->palette_has_changed;
272  dst->sample_rate = src->sample_rate;
273  dst->opaque = src->opaque;
274  dst->pkt_dts = src->pkt_dts;
275  dst->pkt_pos = src->pkt_pos;
276  dst->pkt_size = src->pkt_size;
277  dst->pkt_duration = src->pkt_duration;
278  dst->time_base = src->time_base;
279  dst->reordered_opaque = src->reordered_opaque;
280  dst->quality = src->quality;
281  dst->best_effort_timestamp = src->best_effort_timestamp;
282  dst->coded_picture_number = src->coded_picture_number;
283  dst->display_picture_number = src->display_picture_number;
284  dst->flags = src->flags;
285  dst->decode_error_flags = src->decode_error_flags;
286  dst->color_primaries = src->color_primaries;
287  dst->color_trc = src->color_trc;
288  dst->colorspace = src->colorspace;
289  dst->color_range = src->color_range;
290  dst->chroma_location = src->chroma_location;
291 
292  av_dict_copy(&dst->metadata, src->metadata, 0);
293 
294  for (i = 0; i < src->nb_side_data; i++) {
295  const AVFrameSideData *sd_src = src->side_data[i];
296  AVFrameSideData *sd_dst;
297  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
298  && (src->width != dst->width || src->height != dst->height))
299  continue;
300  if (force_copy) {
301  sd_dst = av_frame_new_side_data(dst, sd_src->type,
302  sd_src->size);
303  if (!sd_dst) {
304  wipe_side_data(dst);
305  return AVERROR(ENOMEM);
306  }
307  memcpy(sd_dst->data, sd_src->data, sd_src->size);
308  } else {
309  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
310  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
311  if (!sd_dst) {
313  wipe_side_data(dst);
314  return AVERROR(ENOMEM);
315  }
316  }
317  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
318  }
319 
320  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
321  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
322  return ret;
323 }
324 
325 int av_frame_ref(AVFrame *dst, const AVFrame *src)
326 {
327  int i, ret = 0;
328 
329  av_assert1(dst->width == 0 && dst->height == 0);
330  av_assert1(dst->channels == 0);
331 
332  dst->format = src->format;
333  dst->width = src->width;
334  dst->height = src->height;
335  dst->channels = src->channels;
336  dst->channel_layout = src->channel_layout;
337  dst->nb_samples = src->nb_samples;
338 
339  ret = frame_copy_props(dst, src, 0);
340  if (ret < 0)
341  goto fail;
342 
343  /* duplicate the frame data if it's not refcounted */
344  if (!src->buf[0]) {
345  ret = av_frame_get_buffer(dst, 0);
346  if (ret < 0)
347  goto fail;
348 
349  ret = av_frame_copy(dst, src);
350  if (ret < 0)
351  goto fail;
352 
353  return 0;
354  }
355 
356  /* ref the buffers */
357  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
358  if (!src->buf[i])
359  continue;
360  dst->buf[i] = av_buffer_ref(src->buf[i]);
361  if (!dst->buf[i]) {
362  ret = AVERROR(ENOMEM);
363  goto fail;
364  }
365  }
366 
367  if (src->extended_buf) {
368  dst->extended_buf = av_calloc(src->nb_extended_buf,
369  sizeof(*dst->extended_buf));
370  if (!dst->extended_buf) {
371  ret = AVERROR(ENOMEM);
372  goto fail;
373  }
374  dst->nb_extended_buf = src->nb_extended_buf;
375 
376  for (i = 0; i < src->nb_extended_buf; i++) {
377  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
378  if (!dst->extended_buf[i]) {
379  ret = AVERROR(ENOMEM);
380  goto fail;
381  }
382  }
383  }
384 
385  if (src->hw_frames_ctx) {
386  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
387  if (!dst->hw_frames_ctx) {
388  ret = AVERROR(ENOMEM);
389  goto fail;
390  }
391  }
392 
393  /* duplicate extended data */
394  if (src->extended_data != src->data) {
395  int ch = src->channels;
396 
397  if (!ch) {
398  ret = AVERROR(EINVAL);
399  goto fail;
400  }
402 
403  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
404  if (!dst->extended_data) {
405  ret = AVERROR(ENOMEM);
406  goto fail;
407  }
408  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
409  } else
410  dst->extended_data = dst->data;
411 
412  memcpy(dst->data, src->data, sizeof(src->data));
413  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
414 
415  return 0;
416 
417 fail:
418  av_frame_unref(dst);
419  return ret;
420 }
421 
423 {
425 
426  if (!ret)
427  return NULL;
428 
429  if (av_frame_ref(ret, src) < 0)
430  av_frame_free(&ret);
431 
432  return ret;
433 }
434 
436 {
437  int i;
438 
439  if (!frame)
440  return;
441 
443 
444  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
445  av_buffer_unref(&frame->buf[i]);
446  for (i = 0; i < frame->nb_extended_buf; i++)
447  av_buffer_unref(&frame->extended_buf[i]);
448  av_freep(&frame->extended_buf);
449  av_dict_free(&frame->metadata);
450 
451  av_buffer_unref(&frame->hw_frames_ctx);
452 
453  av_buffer_unref(&frame->opaque_ref);
454  av_buffer_unref(&frame->private_ref);
455 
456  if (frame->extended_data != frame->data)
457  av_freep(&frame->extended_data);
458 
460 }
461 
463 {
464  av_assert1(dst->width == 0 && dst->height == 0);
465  av_assert1(dst->channels == 0);
466 
467  *dst = *src;
468  if (src->extended_data == src->data)
469  dst->extended_data = dst->data;
471 }
472 
474 {
475  int i, ret = 1;
476 
477  /* assume non-refcounted frames are not writable */
478  if (!frame->buf[0])
479  return 0;
480 
481  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
482  if (frame->buf[i])
483  ret &= !!av_buffer_is_writable(frame->buf[i]);
484  for (i = 0; i < frame->nb_extended_buf; i++)
485  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
486 
487  return ret;
488 }
489 
491 {
492  AVFrame tmp;
493  int ret;
494 
495  if (!frame->buf[0])
496  return AVERROR(EINVAL);
497 
499  return 0;
500 
501  memset(&tmp, 0, sizeof(tmp));
502  tmp.format = frame->format;
503  tmp.width = frame->width;
504  tmp.height = frame->height;
505  tmp.channels = frame->channels;
506  tmp.channel_layout = frame->channel_layout;
507  tmp.nb_samples = frame->nb_samples;
508 
509  if (frame->hw_frames_ctx)
510  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
511  else
512  ret = av_frame_get_buffer(&tmp, 0);
513  if (ret < 0)
514  return ret;
515 
516  ret = av_frame_copy(&tmp, frame);
517  if (ret < 0) {
519  return ret;
520  }
521 
523  if (ret < 0) {
525  return ret;
526  }
527 
529 
530  *frame = tmp;
531  if (tmp.data == tmp.extended_data)
532  frame->extended_data = frame->data;
533 
534  return 0;
535 }
536 
538 {
539  return frame_copy_props(dst, src, 1);
540 }
541 
543 {
544  uint8_t *data;
545  int planes, i;
546 
547  if (frame->nb_samples) {
548  int channels = frame->channels;
549  if (!channels)
550  return NULL;
552  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
553  } else
554  planes = 4;
555 
556  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
557  return NULL;
558  data = frame->extended_data[plane];
559 
560  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
561  AVBufferRef *buf = frame->buf[i];
562  if (data >= buf->data && data < buf->data + buf->size)
563  return buf;
564  }
565  for (i = 0; i < frame->nb_extended_buf; i++) {
566  AVBufferRef *buf = frame->extended_buf[i];
567  if (data >= buf->data && data < buf->data + buf->size)
568  return buf;
569  }
570  return NULL;
571 }
572 
575  AVBufferRef *buf)
576 {
577  AVFrameSideData *ret, **tmp;
578 
579  if (!buf)
580  return NULL;
581 
582  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
583  return NULL;
584 
585  tmp = av_realloc(frame->side_data,
586  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
587  if (!tmp)
588  return NULL;
589  frame->side_data = tmp;
590 
591  ret = av_mallocz(sizeof(*ret));
592  if (!ret)
593  return NULL;
594 
595  ret->buf = buf;
596  ret->data = ret->buf->data;
597  ret->size = buf->size;
598  ret->type = type;
599 
600  frame->side_data[frame->nb_side_data++] = ret;
601 
602  return ret;
603 }
604 
607  size_t size)
608 {
612  if (!ret)
613  av_buffer_unref(&buf);
614  return ret;
615 }
616 
619 {
620  int i;
621 
622  for (i = 0; i < frame->nb_side_data; i++) {
623  if (frame->side_data[i]->type == type)
624  return frame->side_data[i];
625  }
626  return NULL;
627 }
628 
629 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
630 {
631  const uint8_t *src_data[4];
632  int i, planes;
633 
634  if (dst->width < src->width ||
635  dst->height < src->height)
636  return AVERROR(EINVAL);
637 
638  if (src->hw_frames_ctx || dst->hw_frames_ctx)
639  return av_hwframe_transfer_data(dst, src, 0);
640 
642  for (i = 0; i < planes; i++)
643  if (!dst->data[i] || !src->data[i])
644  return AVERROR(EINVAL);
645 
646  memcpy(src_data, src->data, sizeof(src_data));
647  av_image_copy(dst->data, dst->linesize,
648  src_data, src->linesize,
649  dst->format, src->width, src->height);
650 
651  return 0;
652 }
653 
654 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
655 {
657  int channels = dst->channels;
658  int planes = planar ? channels : 1;
659  int i;
660 
661  if (dst->nb_samples != src->nb_samples ||
662  dst->channels != src->channels ||
663  dst->channel_layout != src->channel_layout)
664  return AVERROR(EINVAL);
665 
667 
668  for (i = 0; i < planes; i++)
669  if (!dst->extended_data[i] || !src->extended_data[i])
670  return AVERROR(EINVAL);
671 
672  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
673  dst->nb_samples, channels, dst->format);
674 
675  return 0;
676 }
677 
678 int av_frame_copy(AVFrame *dst, const AVFrame *src)
679 {
680  if (dst->format != src->format || dst->format < 0)
681  return AVERROR(EINVAL);
682 
683  if (dst->width > 0 && dst->height > 0)
684  return frame_copy_video(dst, src);
685  else if (dst->nb_samples > 0 && dst->channels > 0)
686  return frame_copy_audio(dst, src);
687 
688  return AVERROR(EINVAL);
689 }
690 
692 {
693  int i;
694 
695  for (i = frame->nb_side_data - 1; i >= 0; i--) {
696  AVFrameSideData *sd = frame->side_data[i];
697  if (sd->type == type) {
698  free_side_data(&frame->side_data[i]);
699  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
700  frame->nb_side_data--;
701  }
702  }
703 }
704 
706 {
707  switch(type) {
708  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
709  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
710  case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
711  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
712  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
713  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
714  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
715  case AV_FRAME_DATA_AFD: return "Active format description";
716  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
717  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
718  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
719  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
720  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
721  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
722  case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
723  case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
724  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
725  case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
726  case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
727  case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
728  case AV_FRAME_DATA_SEI_UNREGISTERED: return "H.26[45] User Data Unregistered SEI message";
729  case AV_FRAME_DATA_FILM_GRAIN_PARAMS: return "Film grain parameters";
730  case AV_FRAME_DATA_DETECTION_BBOXES: return "Bounding boxes for object detection and classification";
731  case AV_FRAME_DATA_DOVI_RPU_BUFFER: return "Dolby Vision RPU Data";
732  case AV_FRAME_DATA_DOVI_METADATA: return "Dolby Vision Metadata";
733  }
734  return NULL;
735 }
736 
737 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
738  const AVPixFmtDescriptor *desc)
739 {
740  int i, j;
741 
742  for (i = 0; frame->data[i]; i++) {
744  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
745  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
746 
747  if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
748  offsets[i] = 0;
749  break;
750  }
751 
752  /* find any component descriptor for this plane */
753  for (j = 0; j < desc->nb_components; j++) {
754  if (desc->comp[j].plane == i) {
755  comp = &desc->comp[j];
756  break;
757  }
758  }
759  if (!comp)
760  return AVERROR_BUG;
761 
762  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
763  (frame->crop_left >> shift_x) * comp->step;
764  }
765 
766  return 0;
767 }
768 
770 {
771  const AVPixFmtDescriptor *desc;
772  size_t offsets[4];
773  int i;
774 
775  if (!(frame->width > 0 && frame->height > 0))
776  return AVERROR(EINVAL);
777 
778  if (frame->crop_left >= INT_MAX - frame->crop_right ||
779  frame->crop_top >= INT_MAX - frame->crop_bottom ||
780  (frame->crop_left + frame->crop_right) >= frame->width ||
781  (frame->crop_top + frame->crop_bottom) >= frame->height)
782  return AVERROR(ERANGE);
783 
784  desc = av_pix_fmt_desc_get(frame->format);
785  if (!desc)
786  return AVERROR_BUG;
787 
788  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
789  * formats cannot be easily handled here either (and corresponding decoders
790  * should not export any cropping anyway), so do the same for those as well.
791  * */
793  frame->width -= frame->crop_right;
794  frame->height -= frame->crop_bottom;
795  frame->crop_right = 0;
796  frame->crop_bottom = 0;
797  return 0;
798  }
799 
800  /* calculate the offsets for each plane */
802 
803  /* adjust the offsets to avoid breaking alignment */
804  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
805  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
806  int min_log2_align = INT_MAX;
807 
808  for (i = 0; frame->data[i]; i++) {
809  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
810  min_log2_align = FFMIN(log2_align, min_log2_align);
811  }
812 
813  /* we assume, and it should always be true, that the data alignment is
814  * related to the cropping alignment by a constant power-of-2 factor */
815  if (log2_crop_align < min_log2_align)
816  return AVERROR_BUG;
817 
818  if (min_log2_align < 5) {
819  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
821  }
822  }
823 
824  for (i = 0; frame->data[i]; i++)
825  frame->data[i] += offsets[i];
826 
827  frame->width -= (frame->crop_left + frame->crop_right);
828  frame->height -= (frame->crop_top + frame->crop_bottom);
829  frame->crop_left = 0;
830  frame->crop_right = 0;
831  frame->crop_top = 0;
832  frame->crop_bottom = 0;
833 
834  return 0;
835 }
AVFrame::extended_buf
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers,...
Definition: frame.h:527
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:570
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:76
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:566
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:118
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:243
ff_ctz
#define ff_ctz
Definition: intmath.h:106
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:617
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:85
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AV_FRAME_DATA_DOVI_METADATA
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
Definition: frame.h:203
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:444
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:183
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:568
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:151
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame::opaque
void * opaque
for some private data of the user
Definition: frame.h:458
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:577
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:490
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:228
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:424
AVFrame::width
int width
Definition: frame.h:389
AVCOL_SPC_YCOCG
@ AVCOL_SPC_YCOCG
Definition: pixfmt.h:533
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:474
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:497
data
const char data[16]
Definition: mxf.c:143
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:196
AVFrame::pkt_duration
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:601
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:256
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:524
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:896
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:187
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:559
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:513
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:629
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:769
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVFrame::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:654
AVFrame::chroma_location
enum AVChromaLocation chroma_location
Definition: frame.h:579
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2700
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:529
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
fail
#define fail()
Definition: checkasm.h:127
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
samplefmt.h
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:409
wipe_side_data
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:85
val
static double val(void *priv, double ch)
Definition: aeval.c:76
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:145
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:48
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:53
avassert.h
AVFrameSideData::size
size_t size
Definition: frame.h:226
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:628
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:530
AVFrame::pkt_pos
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:593
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
channels
channels
Definition: aptx.h:33
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:422
AVFrame::crop_right
size_t crop_right
Definition: frame.h:667
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:472
planes
static const struct @321 planes[]
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:654
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:130
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:53
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
av_realloc
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:152
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
src
#define src
Definition: vp8dsp.c:255
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:119
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:573
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:431
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:563
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:177
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:226
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:665
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:586
AVFrame::crop_left
size_t crop_left
Definition: frame.h:666
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:414
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:678
cpu.h
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:453
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:494
size
int size
Definition: twinvq_data.h:10344
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:318
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_get_colorspace_name
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:37
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:439
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:124
AVFrameSideData::data
uint8_t * data
Definition: frame.h:225
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:617
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:404
frame.h
buffer.h
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:691
AVFrame::channel_layout
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:499
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
AVFrame::private_ref
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:683
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:108
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:531
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:469
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:220
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:397
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:136
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:378
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:523
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:63
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:462
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:526
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:147
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:617
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:419
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:443
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:643
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:158
AVFrame::height
int height
Definition: frame.h:389
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
channel_layout.h
AVFrame::palette_has_changed
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:479
AV_FRAME_DATA_VIDEO_ENC_PARAMS
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:169
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:528
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:608
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:224
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVFrame::pkt_size
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:637
AVFrame::reordered_opaque
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything).
Definition: frame.h:489
desc
const char * desc
Definition: libsvtav1.c:79
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:223
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:737
AVFrame::crop_top
size_t crop_top
Definition: frame.h:664
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:705
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:164
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:227
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:542
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
CHECK_CHANNELS_CONSISTENCY
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:31
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:525
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:448
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:72
AVFrame::repeat_pict
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:464
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:189
AVFrame::nb_extended_buf
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:531