FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 #include "hwcontext.h"
29 
30 #define CHECK_CHANNELS_CONSISTENCY(frame) \
31  av_assert2(!(frame)->channel_layout || \
32  (frame)->channels == \
33  av_get_channel_layout_nb_channels((frame)->channel_layout))
34 
35 #if FF_API_COLORSPACE_NAME
37 {
38  static const char * const name[] = {
39  [AVCOL_SPC_RGB] = "GBR",
40  [AVCOL_SPC_BT709] = "bt709",
41  [AVCOL_SPC_FCC] = "fcc",
42  [AVCOL_SPC_BT470BG] = "bt470bg",
43  [AVCOL_SPC_SMPTE170M] = "smpte170m",
44  [AVCOL_SPC_SMPTE240M] = "smpte240m",
45  [AVCOL_SPC_YCOCG] = "YCgCo",
46  };
47  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
48  return NULL;
49  return name[val];
50 }
51 #endif
53 {
54  if (frame->extended_data != frame->data)
55  av_freep(&frame->extended_data);
56 
57  memset(frame, 0, sizeof(*frame));
58 
59  frame->pts =
60  frame->pkt_dts = AV_NOPTS_VALUE;
61  frame->best_effort_timestamp = AV_NOPTS_VALUE;
62  frame->pkt_duration = 0;
63  frame->pkt_pos = -1;
64  frame->pkt_size = -1;
65  frame->key_frame = 1;
66  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
67  frame->format = -1; /* unknown */
68  frame->extended_data = frame->data;
69  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
70  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
71  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
72  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
73  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
74  frame->flags = 0;
75 }
76 
77 static void free_side_data(AVFrameSideData **ptr_sd)
78 {
79  AVFrameSideData *sd = *ptr_sd;
80 
81  av_buffer_unref(&sd->buf);
82  av_dict_free(&sd->metadata);
83  av_freep(ptr_sd);
84 }
85 
87 {
88  int i;
89 
90  for (i = 0; i < frame->nb_side_data; i++) {
91  free_side_data(&frame->side_data[i]);
92  }
93  frame->nb_side_data = 0;
94 
95  av_freep(&frame->side_data);
96 }
97 
99 {
100  AVFrame *frame = av_mallocz(sizeof(*frame));
101 
102  if (!frame)
103  return NULL;
104 
105  frame->extended_data = NULL;
107 
108  return frame;
109 }
110 
112 {
113  if (!frame || !*frame)
114  return;
115 
117  av_freep(frame);
118 }
119 
120 static int get_video_buffer(AVFrame *frame, int align)
121 {
123  int ret, i, padded_height, total_size;
124  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
125  ptrdiff_t linesizes[4];
126  size_t sizes[4];
127 
128  if (!desc)
129  return AVERROR(EINVAL);
130 
131  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
132  return ret;
133 
134  if (!frame->linesize[0]) {
135  if (align <= 0)
136  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
137 
138  for(i=1; i<=align; i+=i) {
139  ret = av_image_fill_linesizes(frame->linesize, frame->format,
140  FFALIGN(frame->width, i));
141  if (ret < 0)
142  return ret;
143  if (!(frame->linesize[0] & (align-1)))
144  break;
145  }
146 
147  for (i = 0; i < 4 && frame->linesize[i]; i++)
148  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
149  }
150 
151  for (i = 0; i < 4; i++)
152  linesizes[i] = frame->linesize[i];
153 
154  padded_height = FFALIGN(frame->height, 32);
155  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
156  padded_height, linesizes)) < 0)
157  return ret;
158 
159  total_size = 4*plane_padding;
160  for (i = 0; i < 4; i++) {
161  if (sizes[i] > INT_MAX - total_size)
162  return AVERROR(EINVAL);
163  total_size += sizes[i];
164  }
165 
166  frame->buf[0] = av_buffer_alloc(total_size);
167  if (!frame->buf[0]) {
168  ret = AVERROR(ENOMEM);
169  goto fail;
170  }
171 
172  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
173  frame->buf[0]->data, frame->linesize)) < 0)
174  goto fail;
175 
176  for (i = 1; i < 4; i++) {
177  if (frame->data[i])
178  frame->data[i] += i * plane_padding;
179  }
180 
181  frame->extended_data = frame->data;
182 
183  return 0;
184 fail:
186  return ret;
187 }
188 
189 static int get_audio_buffer(AVFrame *frame, int align)
190 {
191  int channels;
192  int planar = av_sample_fmt_is_planar(frame->format);
193  int planes;
194  int ret, i;
195 
196  if (!frame->channels)
197  frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);
198 
199  channels = frame->channels;
200  planes = planar ? channels : 1;
201 
203  if (!frame->linesize[0]) {
204  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
205  frame->nb_samples, frame->format,
206  align);
207  if (ret < 0)
208  return ret;
209  }
210 
212  frame->extended_data = av_mallocz_array(planes,
213  sizeof(*frame->extended_data));
215  sizeof(*frame->extended_buf));
216  if (!frame->extended_data || !frame->extended_buf) {
217  av_freep(&frame->extended_data);
218  av_freep(&frame->extended_buf);
219  return AVERROR(ENOMEM);
220  }
221  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
222  } else
223  frame->extended_data = frame->data;
224 
225  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
226  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
227  if (!frame->buf[i]) {
229  return AVERROR(ENOMEM);
230  }
231  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
232  }
233  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
234  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
235  if (!frame->extended_buf[i]) {
237  return AVERROR(ENOMEM);
238  }
239  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
240  }
241  return 0;
242 
243 }
244 
246 {
247  if (frame->format < 0)
248  return AVERROR(EINVAL);
249 
250  if (frame->width > 0 && frame->height > 0)
251  return get_video_buffer(frame, align);
252  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
253  return get_audio_buffer(frame, align);
254 
255  return AVERROR(EINVAL);
256 }
257 
258 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
259 {
260  int ret, i;
261 
262  dst->key_frame = src->key_frame;
263  dst->pict_type = src->pict_type;
264  dst->sample_aspect_ratio = src->sample_aspect_ratio;
265  dst->crop_top = src->crop_top;
266  dst->crop_bottom = src->crop_bottom;
267  dst->crop_left = src->crop_left;
268  dst->crop_right = src->crop_right;
269  dst->pts = src->pts;
270  dst->repeat_pict = src->repeat_pict;
271  dst->interlaced_frame = src->interlaced_frame;
272  dst->top_field_first = src->top_field_first;
273  dst->palette_has_changed = src->palette_has_changed;
274  dst->sample_rate = src->sample_rate;
275  dst->opaque = src->opaque;
276  dst->pkt_dts = src->pkt_dts;
277  dst->pkt_pos = src->pkt_pos;
278  dst->pkt_size = src->pkt_size;
279  dst->pkt_duration = src->pkt_duration;
280  dst->reordered_opaque = src->reordered_opaque;
281  dst->quality = src->quality;
282  dst->best_effort_timestamp = src->best_effort_timestamp;
283  dst->coded_picture_number = src->coded_picture_number;
284  dst->display_picture_number = src->display_picture_number;
285  dst->flags = src->flags;
286  dst->decode_error_flags = src->decode_error_flags;
287  dst->color_primaries = src->color_primaries;
288  dst->color_trc = src->color_trc;
289  dst->colorspace = src->colorspace;
290  dst->color_range = src->color_range;
291  dst->chroma_location = src->chroma_location;
292 
293  av_dict_copy(&dst->metadata, src->metadata, 0);
294 
295  for (i = 0; i < src->nb_side_data; i++) {
296  const AVFrameSideData *sd_src = src->side_data[i];
297  AVFrameSideData *sd_dst;
298  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
299  && (src->width != dst->width || src->height != dst->height))
300  continue;
301  if (force_copy) {
302  sd_dst = av_frame_new_side_data(dst, sd_src->type,
303  sd_src->size);
304  if (!sd_dst) {
305  wipe_side_data(dst);
306  return AVERROR(ENOMEM);
307  }
308  memcpy(sd_dst->data, sd_src->data, sd_src->size);
309  } else {
310  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
311  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
312  if (!sd_dst) {
314  wipe_side_data(dst);
315  return AVERROR(ENOMEM);
316  }
317  }
318  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
319  }
320 
321  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
322  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
323  return ret;
324 }
325 
326 int av_frame_ref(AVFrame *dst, const AVFrame *src)
327 {
328  int i, ret = 0;
329 
330  av_assert1(dst->width == 0 && dst->height == 0);
331  av_assert1(dst->channels == 0);
332 
333  dst->format = src->format;
334  dst->width = src->width;
335  dst->height = src->height;
336  dst->channels = src->channels;
337  dst->channel_layout = src->channel_layout;
338  dst->nb_samples = src->nb_samples;
339 
340  ret = frame_copy_props(dst, src, 0);
341  if (ret < 0)
342  goto fail;
343 
344  /* duplicate the frame data if it's not refcounted */
345  if (!src->buf[0]) {
346  ret = av_frame_get_buffer(dst, 0);
347  if (ret < 0)
348  goto fail;
349 
350  ret = av_frame_copy(dst, src);
351  if (ret < 0)
352  goto fail;
353 
354  return 0;
355  }
356 
357  /* ref the buffers */
358  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
359  if (!src->buf[i])
360  continue;
361  dst->buf[i] = av_buffer_ref(src->buf[i]);
362  if (!dst->buf[i]) {
363  ret = AVERROR(ENOMEM);
364  goto fail;
365  }
366  }
367 
368  if (src->extended_buf) {
369  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
370  src->nb_extended_buf);
371  if (!dst->extended_buf) {
372  ret = AVERROR(ENOMEM);
373  goto fail;
374  }
375  dst->nb_extended_buf = src->nb_extended_buf;
376 
377  for (i = 0; i < src->nb_extended_buf; i++) {
378  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
379  if (!dst->extended_buf[i]) {
380  ret = AVERROR(ENOMEM);
381  goto fail;
382  }
383  }
384  }
385 
386  if (src->hw_frames_ctx) {
387  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
388  if (!dst->hw_frames_ctx) {
389  ret = AVERROR(ENOMEM);
390  goto fail;
391  }
392  }
393 
394  /* duplicate extended data */
395  if (src->extended_data != src->data) {
396  int ch = src->channels;
397 
398  if (!ch) {
399  ret = AVERROR(EINVAL);
400  goto fail;
401  }
403 
404  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
405  if (!dst->extended_data) {
406  ret = AVERROR(ENOMEM);
407  goto fail;
408  }
409  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
410  } else
411  dst->extended_data = dst->data;
412 
413  memcpy(dst->data, src->data, sizeof(src->data));
414  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
415 
416  return 0;
417 
418 fail:
419  av_frame_unref(dst);
420  return ret;
421 }
422 
424 {
426 
427  if (!ret)
428  return NULL;
429 
430  if (av_frame_ref(ret, src) < 0)
431  av_frame_free(&ret);
432 
433  return ret;
434 }
435 
437 {
438  int i;
439 
440  if (!frame)
441  return;
442 
444 
445  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
446  av_buffer_unref(&frame->buf[i]);
447  for (i = 0; i < frame->nb_extended_buf; i++)
448  av_buffer_unref(&frame->extended_buf[i]);
449  av_freep(&frame->extended_buf);
450  av_dict_free(&frame->metadata);
451 
452  av_buffer_unref(&frame->hw_frames_ctx);
453 
454  av_buffer_unref(&frame->opaque_ref);
455  av_buffer_unref(&frame->private_ref);
456 
458 }
459 
461 {
462  av_assert1(dst->width == 0 && dst->height == 0);
463  av_assert1(dst->channels == 0);
464 
465  *dst = *src;
466  if (src->extended_data == src->data)
467  dst->extended_data = dst->data;
468  memset(src, 0, sizeof(*src));
470 }
471 
473 {
474  int i, ret = 1;
475 
476  /* assume non-refcounted frames are not writable */
477  if (!frame->buf[0])
478  return 0;
479 
480  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
481  if (frame->buf[i])
482  ret &= !!av_buffer_is_writable(frame->buf[i]);
483  for (i = 0; i < frame->nb_extended_buf; i++)
484  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
485 
486  return ret;
487 }
488 
490 {
491  AVFrame tmp;
492  int ret;
493 
494  if (!frame->buf[0])
495  return AVERROR(EINVAL);
496 
498  return 0;
499 
500  memset(&tmp, 0, sizeof(tmp));
501  tmp.format = frame->format;
502  tmp.width = frame->width;
503  tmp.height = frame->height;
504  tmp.channels = frame->channels;
505  tmp.channel_layout = frame->channel_layout;
506  tmp.nb_samples = frame->nb_samples;
507 
508  if (frame->hw_frames_ctx)
509  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
510  else
511  ret = av_frame_get_buffer(&tmp, 0);
512  if (ret < 0)
513  return ret;
514 
515  ret = av_frame_copy(&tmp, frame);
516  if (ret < 0) {
518  return ret;
519  }
520 
522  if (ret < 0) {
524  return ret;
525  }
526 
528 
529  *frame = tmp;
530  if (tmp.data == tmp.extended_data)
531  frame->extended_data = frame->data;
532 
533  return 0;
534 }
535 
537 {
538  return frame_copy_props(dst, src, 1);
539 }
540 
542 {
543  uint8_t *data;
544  int planes, i;
545 
546  if (frame->nb_samples) {
547  int channels = frame->channels;
548  if (!channels)
549  return NULL;
551  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
552  } else
553  planes = 4;
554 
555  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
556  return NULL;
557  data = frame->extended_data[plane];
558 
559  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
560  AVBufferRef *buf = frame->buf[i];
561  if (data >= buf->data && data < buf->data + buf->size)
562  return buf;
563  }
564  for (i = 0; i < frame->nb_extended_buf; i++) {
565  AVBufferRef *buf = frame->extended_buf[i];
566  if (data >= buf->data && data < buf->data + buf->size)
567  return buf;
568  }
569  return NULL;
570 }
571 
574  AVBufferRef *buf)
575 {
576  AVFrameSideData *ret, **tmp;
577 
578  if (!buf)
579  return NULL;
580 
581  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
582  return NULL;
583 
584  tmp = av_realloc(frame->side_data,
585  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
586  if (!tmp)
587  return NULL;
588  frame->side_data = tmp;
589 
590  ret = av_mallocz(sizeof(*ret));
591  if (!ret)
592  return NULL;
593 
594  ret->buf = buf;
595  ret->data = ret->buf->data;
596  ret->size = buf->size;
597  ret->type = type;
598 
599  frame->side_data[frame->nb_side_data++] = ret;
600 
601  return ret;
602 }
603 
606  size_t size)
607 {
611  if (!ret)
612  av_buffer_unref(&buf);
613  return ret;
614 }
615 
618 {
619  int i;
620 
621  for (i = 0; i < frame->nb_side_data; i++) {
622  if (frame->side_data[i]->type == type)
623  return frame->side_data[i];
624  }
625  return NULL;
626 }
627 
628 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
629 {
630  const uint8_t *src_data[4];
631  int i, planes;
632 
633  if (dst->width < src->width ||
634  dst->height < src->height)
635  return AVERROR(EINVAL);
636 
637  if (src->hw_frames_ctx || dst->hw_frames_ctx)
638  return av_hwframe_transfer_data(dst, src, 0);
639 
641  for (i = 0; i < planes; i++)
642  if (!dst->data[i] || !src->data[i])
643  return AVERROR(EINVAL);
644 
645  memcpy(src_data, src->data, sizeof(src_data));
646  av_image_copy(dst->data, dst->linesize,
647  src_data, src->linesize,
648  dst->format, src->width, src->height);
649 
650  return 0;
651 }
652 
653 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
654 {
656  int channels = dst->channels;
657  int planes = planar ? channels : 1;
658  int i;
659 
660  if (dst->nb_samples != src->nb_samples ||
661  dst->channels != src->channels ||
662  dst->channel_layout != src->channel_layout)
663  return AVERROR(EINVAL);
664 
666 
667  for (i = 0; i < planes; i++)
668  if (!dst->extended_data[i] || !src->extended_data[i])
669  return AVERROR(EINVAL);
670 
671  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
672  dst->nb_samples, channels, dst->format);
673 
674  return 0;
675 }
676 
677 int av_frame_copy(AVFrame *dst, const AVFrame *src)
678 {
679  if (dst->format != src->format || dst->format < 0)
680  return AVERROR(EINVAL);
681 
682  if (dst->width > 0 && dst->height > 0)
683  return frame_copy_video(dst, src);
684  else if (dst->nb_samples > 0 && dst->channels > 0)
685  return frame_copy_audio(dst, src);
686 
687  return AVERROR(EINVAL);
688 }
689 
691 {
692  int i;
693 
694  for (i = frame->nb_side_data - 1; i >= 0; i--) {
695  AVFrameSideData *sd = frame->side_data[i];
696  if (sd->type == type) {
697  free_side_data(&frame->side_data[i]);
698  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
699  frame->nb_side_data--;
700  }
701  }
702 }
703 
705 {
706  switch(type) {
707  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
708  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
709  case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
710  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
711  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
712  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
713  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
714  case AV_FRAME_DATA_AFD: return "Active format description";
715  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
716  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
717  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
718  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
719  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
720  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
721  case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
722  case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
723  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
724  case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
725  case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
726  case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
727  case AV_FRAME_DATA_SEI_UNREGISTERED: return "H.26[45] User Data Unregistered SEI message";
728  case AV_FRAME_DATA_FILM_GRAIN_PARAMS: return "Film grain parameters";
729  case AV_FRAME_DATA_DETECTION_BBOXES: return "Bounding boxes for object detection and classification";
730  }
731  return NULL;
732 }
733 
734 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
735  const AVPixFmtDescriptor *desc)
736 {
737  int i, j;
738 
739  for (i = 0; frame->data[i]; i++) {
741  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
742  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
743 
744  if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
745  offsets[i] = 0;
746  break;
747  }
748 
749  /* find any component descriptor for this plane */
750  for (j = 0; j < desc->nb_components; j++) {
751  if (desc->comp[j].plane == i) {
752  comp = &desc->comp[j];
753  break;
754  }
755  }
756  if (!comp)
757  return AVERROR_BUG;
758 
759  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
760  (frame->crop_left >> shift_x) * comp->step;
761  }
762 
763  return 0;
764 }
765 
767 {
768  const AVPixFmtDescriptor *desc;
769  size_t offsets[4];
770  int i;
771 
772  if (!(frame->width > 0 && frame->height > 0))
773  return AVERROR(EINVAL);
774 
775  if (frame->crop_left >= INT_MAX - frame->crop_right ||
776  frame->crop_top >= INT_MAX - frame->crop_bottom ||
777  (frame->crop_left + frame->crop_right) >= frame->width ||
778  (frame->crop_top + frame->crop_bottom) >= frame->height)
779  return AVERROR(ERANGE);
780 
781  desc = av_pix_fmt_desc_get(frame->format);
782  if (!desc)
783  return AVERROR_BUG;
784 
785  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
786  * formats cannot be easily handled here either (and corresponding decoders
787  * should not export any cropping anyway), so do the same for those as well.
788  * */
790  frame->width -= frame->crop_right;
791  frame->height -= frame->crop_bottom;
792  frame->crop_right = 0;
793  frame->crop_bottom = 0;
794  return 0;
795  }
796 
797  /* calculate the offsets for each plane */
799 
800  /* adjust the offsets to avoid breaking alignment */
801  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
802  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
803  int min_log2_align = INT_MAX;
804 
805  for (i = 0; frame->data[i]; i++) {
806  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
807  min_log2_align = FFMIN(log2_align, min_log2_align);
808  }
809 
810  /* we assume, and it should always be true, that the data alignment is
811  * related to the cropping alignment by a constant power-of-2 factor */
812  if (log2_crop_align < min_log2_align)
813  return AVERROR_BUG;
814 
815  if (min_log2_align < 5) {
816  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
818  }
819  }
820 
821  for (i = 0; frame->data[i]; i++)
822  frame->data[i] += offsets[i];
823 
824  frame->width -= (frame->crop_left + frame->crop_right);
825  frame->height -= (frame->crop_top + frame->crop_bottom);
826  frame->crop_left = 0;
827  frame->crop_right = 0;
828  frame->crop_top = 0;
829  frame->crop_bottom = 0;
830 
831  return 0;
832 }
AVFrame::extended_buf
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers,...
Definition: frame.h:491
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:534
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:77
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:530
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:120
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:245
ff_ctz
#define ff_ctz
Definition: intmath.h:106
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:616
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:85
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:604
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2541
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:92
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:408
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:183
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:532
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:151
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame::opaque
void * opaque
for some private data of the user
Definition: frame.h:422
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:541
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:27
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:489
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:214
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:396
AVFrame::width
int width
Definition: frame.h:361
AVCOL_SPC_YCOCG
@ AVCOL_SPC_YCOCG
Definition: pixfmt.h:512
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:438
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:476
data
const char data[16]
Definition: mxf.c:142
AVFrame::pkt_duration
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:565
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:258
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:197
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:503
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:860
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:189
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:523
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:477
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:628
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:766
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
AVFrame::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:618
AVFrame::chroma_location
enum AVChromaLocation chroma_location
Definition: frame.h:543
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2581
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:508
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
fail
#define fail()
Definition: checkasm.h:134
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:129
samplefmt.h
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:381
wipe_side_data
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:86
val
static double val(void *priv, double ch)
Definition: aeval.c:76
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:145
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:48
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:98
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:52
avassert.h
AVFrameSideData::size
size_t size
Definition: frame.h:212
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:592
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:509
AVFrame::pkt_pos
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:557
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
channels
channels
Definition: aptx.h:33
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:423
AVFrame::crop_right
size_t crop_right
Definition: frame.h:631
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:451
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:653
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:130
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:53
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:536
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AVComponentDescriptor
Definition: pixdesc.h:31
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
src
#define src
Definition: vp8dsp.c:255
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:119
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:572
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:403
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:542
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:177
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:226
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:629
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:550
AVFrame::crop_left
size_t crop_left
Definition: frame.h:630
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:386
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:326
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:677
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:417
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:458
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
size
int size
Definition: twinvq_data.h:10344
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:304
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_get_colorspace_name
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:36
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:125
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:472
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:596
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:376
frame.h
buffer.h
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:690
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
AVFrame::channel_layout
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:463
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:96
AVFrame::private_ref
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:647
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:108
av_realloc
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:135
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:510
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:433
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:220
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:369
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:136
i
int i
Definition: input.c:407
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:350
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:502
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:63
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:460
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:436
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:244
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:505
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:133
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:581
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:391
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:443
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:607
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:158
AVFrame::height
int height
Definition: frame.h:361
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
channel_layout.h
AVFrame::palette_has_changed
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:443
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:219
AV_FRAME_DATA_VIDEO_ENC_PARAMS
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:169
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:507
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:572
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:210
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVFrame::pkt_size
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:601
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AVFrame::reordered_opaque
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything).
Definition: frame.h:453
planes
static const struct @322 planes[]
desc
const char * desc
Definition: libsvtav1.c:79
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:84
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:70
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:734
AVFrame::crop_top
size_t crop_top
Definition: frame.h:628
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:704
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:164
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:213
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:541
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
CHECK_CHANNELS_CONSISTENCY
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:30
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:504
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:412
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:121
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:72
AVFrame::repeat_pict
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:428
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:189
AVFrame::nb_extended_buf
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:495