FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 #include "hwcontext.h"
29 
30 #if FF_API_FRAME_GET_SET
31 MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
32 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
33 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
34 MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
35 MAKE_ACCESSORS(AVFrame, frame, int, channels)
36 MAKE_ACCESSORS(AVFrame, frame, int, sample_rate)
37 MAKE_ACCESSORS(AVFrame, frame, AVDictionary *, metadata)
38 MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
39 MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
40 MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
41 MAKE_ACCESSORS(AVFrame, frame, enum AVColorRange, color_range)
42 #endif
43 
44 #define CHECK_CHANNELS_CONSISTENCY(frame) \
45  av_assert2(!(frame)->channel_layout || \
46  (frame)->channels == \
47  av_get_channel_layout_nb_channels((frame)->channel_layout))
48 
49 #if FF_API_FRAME_QP
50 struct qp_properties {
51  int stride;
52  int type;
53 };
54 
55 int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
56 {
57  struct qp_properties *p;
58  AVFrameSideData *sd;
60 
63 
64  f->qp_table_buf = buf;
65  f->qscale_table = buf->data;
66  f->qstride = stride;
67  f->qscale_type = qp_type;
69 
72 
73  ref = av_buffer_ref(buf);
75  av_buffer_unref(&ref);
76  return AVERROR(ENOMEM);
77  }
78 
80  sizeof(struct qp_properties));
81  if (!sd)
82  return AVERROR(ENOMEM);
83 
84  p = (struct qp_properties *)sd->data;
85  p->stride = stride;
86  p->type = qp_type;
87 
88  return 0;
89 }
90 
91 int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
92 {
93  AVBufferRef *buf = NULL;
94 
95  *stride = 0;
96  *type = 0;
97 
99  if (f->qp_table_buf) {
100  *stride = f->qstride;
101  *type = f->qscale_type;
102  buf = f->qp_table_buf;
104  } else {
105  AVFrameSideData *sd;
106  struct qp_properties *p;
108  if (!sd)
109  return NULL;
110  p = (struct qp_properties *)sd->data;
112  if (!sd)
113  return NULL;
114  *stride = p->stride;
115  *type = p->type;
116  buf = sd->buf;
117  }
118 
119  return buf ? buf->data : NULL;
120 }
121 #endif
122 
123 #if FF_API_COLORSPACE_NAME
124 const char *av_get_colorspace_name(enum AVColorSpace val)
125 {
126  static const char * const name[] = {
127  [AVCOL_SPC_RGB] = "GBR",
128  [AVCOL_SPC_BT709] = "bt709",
129  [AVCOL_SPC_FCC] = "fcc",
130  [AVCOL_SPC_BT470BG] = "bt470bg",
131  [AVCOL_SPC_SMPTE170M] = "smpte170m",
132  [AVCOL_SPC_SMPTE240M] = "smpte240m",
133  [AVCOL_SPC_YCOCG] = "YCgCo",
134  };
135  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
136  return NULL;
137  return name[val];
138 }
139 #endif
140 static void get_frame_defaults(AVFrame *frame)
141 {
142  if (frame->extended_data != frame->data)
143  av_freep(&frame->extended_data);
144 
145  memset(frame, 0, sizeof(*frame));
146 
147  frame->pts =
148  frame->pkt_dts = AV_NOPTS_VALUE;
149 #if FF_API_PKT_PTS
151  frame->pkt_pts = AV_NOPTS_VALUE;
153 #endif
155  frame->pkt_duration = 0;
156  frame->pkt_pos = -1;
157  frame->pkt_size = -1;
158  frame->key_frame = 1;
159  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
160  frame->format = -1; /* unknown */
161  frame->extended_data = frame->data;
167  frame->flags = 0;
168 }
169 
170 static void free_side_data(AVFrameSideData **ptr_sd)
171 {
172  AVFrameSideData *sd = *ptr_sd;
173 
174  av_buffer_unref(&sd->buf);
175  av_dict_free(&sd->metadata);
176  av_freep(ptr_sd);
177 }
178 
179 static void wipe_side_data(AVFrame *frame)
180 {
181  int i;
182 
183  for (i = 0; i < frame->nb_side_data; i++) {
184  free_side_data(&frame->side_data[i]);
185  }
186  frame->nb_side_data = 0;
187 
188  av_freep(&frame->side_data);
189 }
190 
191 AVFrame *av_frame_alloc(void)
192 {
193  AVFrame *frame = av_mallocz(sizeof(*frame));
194 
195  if (!frame)
196  return NULL;
197 
198  frame->extended_data = NULL;
199  get_frame_defaults(frame);
200 
201  return frame;
202 }
203 
204 void av_frame_free(AVFrame **frame)
205 {
206  if (!frame || !*frame)
207  return;
208 
209  av_frame_unref(*frame);
210  av_freep(frame);
211 }
212 
213 static int get_video_buffer(AVFrame *frame, int align)
214 {
216  int ret, i, padded_height, total_size;
217  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
218  ptrdiff_t linesizes[4];
219  size_t sizes[4];
220 
221  if (!desc)
222  return AVERROR(EINVAL);
223 
224  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
225  return ret;
226 
227  if (!frame->linesize[0]) {
228  if (align <= 0)
229  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
230 
231  for(i=1; i<=align; i+=i) {
232  ret = av_image_fill_linesizes(frame->linesize, frame->format,
233  FFALIGN(frame->width, i));
234  if (ret < 0)
235  return ret;
236  if (!(frame->linesize[0] & (align-1)))
237  break;
238  }
239 
240  for (i = 0; i < 4 && frame->linesize[i]; i++)
241  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
242  }
243 
244  for (i = 0; i < 4; i++)
245  linesizes[i] = frame->linesize[i];
246 
247  padded_height = FFALIGN(frame->height, 32);
248  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
249  padded_height, linesizes)) < 0)
250  return ret;
251 
252  total_size = 4*plane_padding;
253  for (i = 0; i < 4; i++) {
254  if (sizes[i] > INT_MAX - total_size)
255  return AVERROR(EINVAL);
256  total_size += sizes[i];
257  }
258 
259  frame->buf[0] = av_buffer_alloc(total_size);
260  if (!frame->buf[0]) {
261  ret = AVERROR(ENOMEM);
262  goto fail;
263  }
264 
265  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
266  frame->buf[0]->data, frame->linesize)) < 0)
267  goto fail;
268 
269  for (i = 1; i < 4; i++) {
270  if (frame->data[i])
271  frame->data[i] += i * plane_padding;
272  }
273 
274  frame->extended_data = frame->data;
275 
276  return 0;
277 fail:
278  av_frame_unref(frame);
279  return ret;
280 }
281 
282 static int get_audio_buffer(AVFrame *frame, int align)
283 {
284  int channels;
285  int planar = av_sample_fmt_is_planar(frame->format);
286  int planes;
287  int ret, i;
288 
289  if (!frame->channels)
291 
292  channels = frame->channels;
293  planes = planar ? channels : 1;
294 
296  if (!frame->linesize[0]) {
297  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
298  frame->nb_samples, frame->format,
299  align);
300  if (ret < 0)
301  return ret;
302  }
303 
304  if (planes > AV_NUM_DATA_POINTERS) {
305  frame->extended_data = av_mallocz_array(planes,
306  sizeof(*frame->extended_data));
308  sizeof(*frame->extended_buf));
309  if (!frame->extended_data || !frame->extended_buf) {
310  av_freep(&frame->extended_data);
311  av_freep(&frame->extended_buf);
312  return AVERROR(ENOMEM);
313  }
314  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
315  } else
316  frame->extended_data = frame->data;
317 
318  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
319  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
320  if (!frame->buf[i]) {
321  av_frame_unref(frame);
322  return AVERROR(ENOMEM);
323  }
324  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
325  }
326  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
327  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
328  if (!frame->extended_buf[i]) {
329  av_frame_unref(frame);
330  return AVERROR(ENOMEM);
331  }
332  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
333  }
334  return 0;
335 
336 }
337 
338 int av_frame_get_buffer(AVFrame *frame, int align)
339 {
340  if (frame->format < 0)
341  return AVERROR(EINVAL);
342 
343  if (frame->width > 0 && frame->height > 0)
344  return get_video_buffer(frame, align);
345  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
346  return get_audio_buffer(frame, align);
347 
348  return AVERROR(EINVAL);
349 }
350 
351 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
352 {
353  int ret, i;
354 
355  dst->key_frame = src->key_frame;
356  dst->pict_type = src->pict_type;
358  dst->crop_top = src->crop_top;
359  dst->crop_bottom = src->crop_bottom;
360  dst->crop_left = src->crop_left;
361  dst->crop_right = src->crop_right;
362  dst->pts = src->pts;
363  dst->repeat_pict = src->repeat_pict;
365  dst->top_field_first = src->top_field_first;
367  dst->sample_rate = src->sample_rate;
368  dst->opaque = src->opaque;
369 #if FF_API_PKT_PTS
371  dst->pkt_pts = src->pkt_pts;
373 #endif
374  dst->pkt_dts = src->pkt_dts;
375  dst->pkt_pos = src->pkt_pos;
376  dst->pkt_size = src->pkt_size;
377  dst->pkt_duration = src->pkt_duration;
379  dst->quality = src->quality;
383  dst->flags = src->flags;
385  dst->color_primaries = src->color_primaries;
386  dst->color_trc = src->color_trc;
387  dst->colorspace = src->colorspace;
388  dst->color_range = src->color_range;
389  dst->chroma_location = src->chroma_location;
390 
391  av_dict_copy(&dst->metadata, src->metadata, 0);
392 
393 #if FF_API_ERROR_FRAME
395  memcpy(dst->error, src->error, sizeof(dst->error));
397 #endif
398 
399  for (i = 0; i < src->nb_side_data; i++) {
400  const AVFrameSideData *sd_src = src->side_data[i];
401  AVFrameSideData *sd_dst;
402  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
403  && (src->width != dst->width || src->height != dst->height))
404  continue;
405  if (force_copy) {
406  sd_dst = av_frame_new_side_data(dst, sd_src->type,
407  sd_src->size);
408  if (!sd_dst) {
409  wipe_side_data(dst);
410  return AVERROR(ENOMEM);
411  }
412  memcpy(sd_dst->data, sd_src->data, sd_src->size);
413  } else {
414  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
415  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
416  if (!sd_dst) {
417  av_buffer_unref(&ref);
418  wipe_side_data(dst);
419  return AVERROR(ENOMEM);
420  }
421  }
422  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
423  }
424 
425 #if FF_API_FRAME_QP
427  dst->qscale_table = NULL;
428  dst->qstride = 0;
429  dst->qscale_type = 0;
431  if (dst->qp_table_buf) {
432  dst->qscale_table = dst->qp_table_buf->data;
433  dst->qstride = src->qstride;
434  dst->qscale_type = src->qscale_type;
435  }
437 #endif
438 
439  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
440  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
441  return ret;
442 }
443 
444 int av_frame_ref(AVFrame *dst, const AVFrame *src)
445 {
446  int i, ret = 0;
447 
448  av_assert1(dst->width == 0 && dst->height == 0);
449  av_assert1(dst->channels == 0);
450 
451  dst->format = src->format;
452  dst->width = src->width;
453  dst->height = src->height;
454  dst->channels = src->channels;
455  dst->channel_layout = src->channel_layout;
456  dst->nb_samples = src->nb_samples;
457 
458  ret = frame_copy_props(dst, src, 0);
459  if (ret < 0)
460  goto fail;
461 
462  /* duplicate the frame data if it's not refcounted */
463  if (!src->buf[0]) {
464  ret = av_frame_get_buffer(dst, 0);
465  if (ret < 0)
466  goto fail;
467 
468  ret = av_frame_copy(dst, src);
469  if (ret < 0)
470  goto fail;
471 
472  return 0;
473  }
474 
475  /* ref the buffers */
476  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
477  if (!src->buf[i])
478  continue;
479  dst->buf[i] = av_buffer_ref(src->buf[i]);
480  if (!dst->buf[i]) {
481  ret = AVERROR(ENOMEM);
482  goto fail;
483  }
484  }
485 
486  if (src->extended_buf) {
487  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
488  src->nb_extended_buf);
489  if (!dst->extended_buf) {
490  ret = AVERROR(ENOMEM);
491  goto fail;
492  }
493  dst->nb_extended_buf = src->nb_extended_buf;
494 
495  for (i = 0; i < src->nb_extended_buf; i++) {
496  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
497  if (!dst->extended_buf[i]) {
498  ret = AVERROR(ENOMEM);
499  goto fail;
500  }
501  }
502  }
503 
504  if (src->hw_frames_ctx) {
506  if (!dst->hw_frames_ctx) {
507  ret = AVERROR(ENOMEM);
508  goto fail;
509  }
510  }
511 
512  /* duplicate extended data */
513  if (src->extended_data != src->data) {
514  int ch = src->channels;
515 
516  if (!ch) {
517  ret = AVERROR(EINVAL);
518  goto fail;
519  }
521 
522  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
523  if (!dst->extended_data) {
524  ret = AVERROR(ENOMEM);
525  goto fail;
526  }
527  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
528  } else
529  dst->extended_data = dst->data;
530 
531  memcpy(dst->data, src->data, sizeof(src->data));
532  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
533 
534  return 0;
535 
536 fail:
537  av_frame_unref(dst);
538  return ret;
539 }
540 
541 AVFrame *av_frame_clone(const AVFrame *src)
542 {
543  AVFrame *ret = av_frame_alloc();
544 
545  if (!ret)
546  return NULL;
547 
548  if (av_frame_ref(ret, src) < 0)
549  av_frame_free(&ret);
550 
551  return ret;
552 }
553 
554 void av_frame_unref(AVFrame *frame)
555 {
556  int i;
557 
558  if (!frame)
559  return;
560 
561  wipe_side_data(frame);
562 
563  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
564  av_buffer_unref(&frame->buf[i]);
565  for (i = 0; i < frame->nb_extended_buf; i++)
566  av_buffer_unref(&frame->extended_buf[i]);
567  av_freep(&frame->extended_buf);
568  av_dict_free(&frame->metadata);
569 #if FF_API_FRAME_QP
571  av_buffer_unref(&frame->qp_table_buf);
573 #endif
574 
576 
577  av_buffer_unref(&frame->opaque_ref);
578  av_buffer_unref(&frame->private_ref);
579 
580  get_frame_defaults(frame);
581 }
582 
583 void av_frame_move_ref(AVFrame *dst, AVFrame *src)
584 {
585  av_assert1(dst->width == 0 && dst->height == 0);
586  av_assert1(dst->channels == 0);
587 
588  *dst = *src;
589  if (src->extended_data == src->data)
590  dst->extended_data = dst->data;
591  memset(src, 0, sizeof(*src));
592  get_frame_defaults(src);
593 }
594 
595 int av_frame_is_writable(AVFrame *frame)
596 {
597  int i, ret = 1;
598 
599  /* assume non-refcounted frames are not writable */
600  if (!frame->buf[0])
601  return 0;
602 
603  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
604  if (frame->buf[i])
605  ret &= !!av_buffer_is_writable(frame->buf[i]);
606  for (i = 0; i < frame->nb_extended_buf; i++)
607  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
608 
609  return ret;
610 }
611 
612 int av_frame_make_writable(AVFrame *frame)
613 {
614  AVFrame tmp;
615  int ret;
616 
617  if (!frame->buf[0])
618  return AVERROR(EINVAL);
619 
620  if (av_frame_is_writable(frame))
621  return 0;
622 
623  memset(&tmp, 0, sizeof(tmp));
624  tmp.format = frame->format;
625  tmp.width = frame->width;
626  tmp.height = frame->height;
627  tmp.channels = frame->channels;
628  tmp.channel_layout = frame->channel_layout;
629  tmp.nb_samples = frame->nb_samples;
630 
631  if (frame->hw_frames_ctx)
632  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
633  else
634  ret = av_frame_get_buffer(&tmp, 0);
635  if (ret < 0)
636  return ret;
637 
638  ret = av_frame_copy(&tmp, frame);
639  if (ret < 0) {
640  av_frame_unref(&tmp);
641  return ret;
642  }
643 
644  ret = av_frame_copy_props(&tmp, frame);
645  if (ret < 0) {
646  av_frame_unref(&tmp);
647  return ret;
648  }
649 
650  av_frame_unref(frame);
651 
652  *frame = tmp;
653  if (tmp.data == tmp.extended_data)
654  frame->extended_data = frame->data;
655 
656  return 0;
657 }
658 
659 int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
660 {
661  return frame_copy_props(dst, src, 1);
662 }
663 
664 AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane)
665 {
666  uint8_t *data;
667  int planes, i;
668 
669  if (frame->nb_samples) {
670  int channels = frame->channels;
671  if (!channels)
672  return NULL;
674  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
675  } else
676  planes = 4;
677 
678  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
679  return NULL;
680  data = frame->extended_data[plane];
681 
682  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
683  AVBufferRef *buf = frame->buf[i];
684  if (data >= buf->data && data < buf->data + buf->size)
685  return buf;
686  }
687  for (i = 0; i < frame->nb_extended_buf; i++) {
688  AVBufferRef *buf = frame->extended_buf[i];
689  if (data >= buf->data && data < buf->data + buf->size)
690  return buf;
691  }
692  return NULL;
693 }
694 
697  AVBufferRef *buf)
698 {
699  AVFrameSideData *ret, **tmp;
700 
701  if (!buf)
702  return NULL;
703 
704  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
705  return NULL;
706 
707  tmp = av_realloc(frame->side_data,
708  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
709  if (!tmp)
710  return NULL;
711  frame->side_data = tmp;
712 
713  ret = av_mallocz(sizeof(*ret));
714  if (!ret)
715  return NULL;
716 
717  ret->buf = buf;
718  ret->data = ret->buf->data;
719  ret->size = buf->size;
720  ret->type = type;
721 
722  frame->side_data[frame->nb_side_data++] = ret;
723 
724  return ret;
725 }
726 
730 {
732  AVBufferRef *buf = av_buffer_alloc(size);
733  ret = av_frame_new_side_data_from_buf(frame, type, buf);
734  if (!ret)
735  av_buffer_unref(&buf);
736  return ret;
737 }
738 
741 {
742  int i;
743 
744  for (i = 0; i < frame->nb_side_data; i++) {
745  if (frame->side_data[i]->type == type)
746  return frame->side_data[i];
747  }
748  return NULL;
749 }
750 
751 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
752 {
753  const uint8_t *src_data[4];
754  int i, planes;
755 
756  if (dst->width < src->width ||
757  dst->height < src->height)
758  return AVERROR(EINVAL);
759 
760  if (src->hw_frames_ctx || dst->hw_frames_ctx)
761  return av_hwframe_transfer_data(dst, src, 0);
762 
763  planes = av_pix_fmt_count_planes(dst->format);
764  for (i = 0; i < planes; i++)
765  if (!dst->data[i] || !src->data[i])
766  return AVERROR(EINVAL);
767 
768  memcpy(src_data, src->data, sizeof(src_data));
769  av_image_copy(dst->data, dst->linesize,
770  src_data, src->linesize,
771  dst->format, src->width, src->height);
772 
773  return 0;
774 }
775 
776 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
777 {
779  int channels = dst->channels;
780  int planes = planar ? channels : 1;
781  int i;
782 
783  if (dst->nb_samples != src->nb_samples ||
784  dst->channels != src->channels ||
785  dst->channel_layout != src->channel_layout)
786  return AVERROR(EINVAL);
787 
789 
790  for (i = 0; i < planes; i++)
791  if (!dst->extended_data[i] || !src->extended_data[i])
792  return AVERROR(EINVAL);
793 
795  dst->nb_samples, channels, dst->format);
796 
797  return 0;
798 }
799 
800 int av_frame_copy(AVFrame *dst, const AVFrame *src)
801 {
802  if (dst->format != src->format || dst->format < 0)
803  return AVERROR(EINVAL);
804 
805  if (dst->width > 0 && dst->height > 0)
806  return frame_copy_video(dst, src);
807  else if (dst->nb_samples > 0 && dst->channels > 0)
808  return frame_copy_audio(dst, src);
809 
810  return AVERROR(EINVAL);
811 }
812 
814 {
815  int i;
816 
817  for (i = frame->nb_side_data - 1; i >= 0; i--) {
818  AVFrameSideData *sd = frame->side_data[i];
819  if (sd->type == type) {
820  free_side_data(&frame->side_data[i]);
821  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
822  frame->nb_side_data--;
823  }
824  }
825 }
826 
828 {
829  switch(type) {
830  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
831  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
832  case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
833  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
834  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
835  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
836  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
837  case AV_FRAME_DATA_AFD: return "Active format description";
838  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
839  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
840  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
841  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
842  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
843  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
844  case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
845  case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
846  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
847 #if FF_API_FRAME_QP
848  case AV_FRAME_DATA_QP_TABLE_PROPERTIES: return "QP table properties";
849  case AV_FRAME_DATA_QP_TABLE_DATA: return "QP table data";
850 #endif
851  case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
852  case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
853  case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
854  case AV_FRAME_DATA_SEI_UNREGISTERED: return "H.26[45] User Data Unregistered SEI message";
855  case AV_FRAME_DATA_FILM_GRAIN_PARAMS: return "Film grain parameters";
856  }
857  return NULL;
858 }
859 
860 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
861  const AVPixFmtDescriptor *desc)
862 {
863  int i, j;
864 
865  for (i = 0; frame->data[i]; i++) {
867  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
868  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
869 
870  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL) && i == 1) {
871  offsets[i] = 0;
872  break;
873  }
874 
875  /* find any component descriptor for this plane */
876  for (j = 0; j < desc->nb_components; j++) {
877  if (desc->comp[j].plane == i) {
878  comp = &desc->comp[j];
879  break;
880  }
881  }
882  if (!comp)
883  return AVERROR_BUG;
884 
885  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
886  (frame->crop_left >> shift_x) * comp->step;
887  }
888 
889  return 0;
890 }
891 
892 int av_frame_apply_cropping(AVFrame *frame, int flags)
893 {
894  const AVPixFmtDescriptor *desc;
895  size_t offsets[4];
896  int i;
897 
898  if (!(frame->width > 0 && frame->height > 0))
899  return AVERROR(EINVAL);
900 
901  if (frame->crop_left >= INT_MAX - frame->crop_right ||
902  frame->crop_top >= INT_MAX - frame->crop_bottom ||
903  (frame->crop_left + frame->crop_right) >= frame->width ||
904  (frame->crop_top + frame->crop_bottom) >= frame->height)
905  return AVERROR(ERANGE);
906 
907  desc = av_pix_fmt_desc_get(frame->format);
908  if (!desc)
909  return AVERROR_BUG;
910 
911  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
912  * formats cannot be easily handled here either (and corresponding decoders
913  * should not export any cropping anyway), so do the same for those as well.
914  * */
916  frame->width -= frame->crop_right;
917  frame->height -= frame->crop_bottom;
918  frame->crop_right = 0;
919  frame->crop_bottom = 0;
920  return 0;
921  }
922 
923  /* calculate the offsets for each plane */
924  calc_cropping_offsets(offsets, frame, desc);
925 
926  /* adjust the offsets to avoid breaking alignment */
927  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
928  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
929  int min_log2_align = INT_MAX;
930 
931  for (i = 0; frame->data[i]; i++) {
932  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
933  min_log2_align = FFMIN(log2_align, min_log2_align);
934  }
935 
936  /* we assume, and it should always be true, that the data alignment is
937  * related to the cropping alignment by a constant power-of-2 factor */
938  if (log2_crop_align < min_log2_align)
939  return AVERROR_BUG;
940 
941  if (min_log2_align < 5) {
942  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
943  calc_cropping_offsets(offsets, frame, desc);
944  }
945  }
946 
947  for (i = 0; frame->data[i]; i++)
948  frame->data[i] += offsets[i];
949 
950  frame->width -= (frame->crop_left + frame->crop_right);
951  frame->height -= (frame->crop_top + frame->crop_bottom);
952  frame->crop_left = 0;
953  frame->crop_right = 0;
954  frame->crop_top = 0;
955  frame->crop_bottom = 0;
956 
957  return 0;
958 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55
int plane
Which of the 4 planes contains the component.
Definition: pixdesc.h:35
#define NULL
Definition: coverity.c:32
#define ff_ctz
Definition: intmath.h:106
#define AV_NUM_DATA_POINTERS
Definition: frame.h:319
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:134
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
attribute_deprecated int qscale_type
Definition: frame.h:648
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:589
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:124
const char * desc
Definition: libsvtav1.c:79
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
Memory handling functions.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:509
AVDictionary * metadata
Definition: frame.h:228
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
void * opaque
for some private data of the user
Definition: frame.h:446
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:527
Content light level (based on CTA-861.3).
Definition: frame.h:136
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:460
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:519
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
Mastering display metadata associated with a video frame.
Definition: frame.h:119
attribute_deprecated AVBufferRef * qp_table_buf
Definition: frame.h:651
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:179
color_range
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:583
#define FF_ARRAY_ELEMS(a)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
size_t crop_bottom
Definition: frame.h:679
static const struct @322 planes[]
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:513
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
attribute_deprecated int8_t * qscale_table
QP table.
Definition: frame.h:640
functionally identical to above
Definition: pixfmt.h:520
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:739
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:657
Public dictionary API.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
static const int offsets[]
Definition: hevc_pel.c:34
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:191
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:200
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:751
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:512
size_t crop_left
Definition: frame.h:680
#define f(width, name)
Definition: cbs_vp9.c:255
attribute_deprecated int qstride
QP store stride.
Definition: frame.h:645
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:697
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
AVBufferRef * buf
Definition: frame.h:229
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
HDR dynamic metadata associated with a video frame.
Definition: frame.h:175
Structure to hold side data for an AVFrame.
Definition: frame.h:220
AVDictionary * metadata
metadata.
Definition: frame.h:604
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:465
AVColorRange
Visual content value range.
Definition: pixfmt.h:551
ptrdiff_t size
Definition: opengl_enc.c:100
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
channels
Definition: aptx.h:33
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
Metadata relevant to a downmix procedure.
Definition: frame.h:72
int nb_side_data
Definition: frame.h:530
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:453
AVFrameSideData ** side_data
Definition: frame.h:529
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
Definition: frame.h:152
#define src
Definition: vp8dsp.c:255
int width
Definition: frame.h:376
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:90
int buffer_size_t
Definition: internal.h:306
static const int sizes[][2]
Definition: img2dec.c:53
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:351
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:892
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:204
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:562
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:573
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
#define FFMAX(a, b)
Definition: common.h:103
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:282
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
#define fail()
Definition: checkasm.h:133
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:800
reference-counted frame API
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:495
size_t crop_top
Definition: frame.h:678
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:443
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int channels
number of audio channels, only used for audio.
Definition: frame.h:624
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:555
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFMIN(a, b)
Definition: common.h:105
int display_picture_number
picture number in display order
Definition: frame.h:436
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:523
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:695
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:664
AVFrameSideDataType
Definition: frame.h:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:441
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:133
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:517
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:85
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:391
int coded_picture_number
picture number in bitstream order
Definition: frame.h:432
sample_rate
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:146
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:597
int stride
Definition: frame.c:51
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:595
AVBufferRef * av_buffer_alloc(buffer_size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:213
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:860
User data unregistered metadata associated with a video frame.
Definition: frame.h:194
uint8_t * data
The data buffer.
Definition: buffer.h:92
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:406
uint8_t * data
Definition: frame.h:222
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:813
size_t crop_right
Definition: frame.h:681
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:485
int sample_rate
Sample rate of the audio data.
Definition: frame.h:490
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:727
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:181
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:475
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
refcounted data buffer API
enum AVChromaLocation chroma_location
Definition: frame.h:575
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:582
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:613
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:776
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:136
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:827
int size
Size of data in bytes.
Definition: buffer.h:97
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:338
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
enum AVFrameSideDataType type
Definition: frame.h:221
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:612
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:419
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:427
A reference to a data buffer.
Definition: buffer.h:84
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal and external API header
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:44
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:970
#define FF_PSEUDOPAL
Definition: internal.h:299
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:470
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:668
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
enum AVColorPrimaries color_primaries
Definition: frame.h:564
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:186
int height
Definition: frame.h:376
#define av_freep(p)
int type
Definition: frame.c:52
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:170
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:566
Recommmends skipping the specified number of samples.
Definition: frame.h:108
#define av_malloc_array(a, b)
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:365
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:140
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:91
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:633
Stereoscopic 3d metadata.
Definition: frame.h:63
static double val(void *priv, double ch)
Definition: aeval.c:76
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
int av_buffer_replace(AVBufferRef **pdst, AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:219
Raw QP table data.
Definition: frame.h:159
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
const char * name
Definition: opengl_enc.c:102
static uint8_t tmp[11]
Definition: aes_ctr.c:27