FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "cpu.h"
24 #include "dict.h"
25 #include "frame.h"
26 #include "imgutils.h"
27 #include "mem.h"
28 #include "samplefmt.h"
29 #include "hwcontext.h"
30 
31 #if FF_API_OLD_CHANNEL_LAYOUT
32 #define CHECK_CHANNELS_CONSISTENCY(frame) \
33  av_assert2(!(frame)->channel_layout || \
34  (frame)->channels == \
35  av_get_channel_layout_nb_channels((frame)->channel_layout))
36 #endif
37 
38 #if FF_API_COLORSPACE_NAME
40 {
41  static const char * const name[] = {
42  [AVCOL_SPC_RGB] = "GBR",
43  [AVCOL_SPC_BT709] = "bt709",
44  [AVCOL_SPC_FCC] = "fcc",
45  [AVCOL_SPC_BT470BG] = "bt470bg",
46  [AVCOL_SPC_SMPTE170M] = "smpte170m",
47  [AVCOL_SPC_SMPTE240M] = "smpte240m",
48  [AVCOL_SPC_YCOCG] = "YCgCo",
49  };
50  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
51  return NULL;
52  return name[val];
53 }
54 #endif
56 {
57  memset(frame, 0, sizeof(*frame));
58 
59  frame->pts =
60  frame->pkt_dts = AV_NOPTS_VALUE;
61  frame->best_effort_timestamp = AV_NOPTS_VALUE;
62  frame->duration = 0;
63 #if FF_API_PKT_DURATION
65  frame->pkt_duration = 0;
67 #endif
68  frame->pkt_pos = -1;
69  frame->pkt_size = -1;
70  frame->time_base = (AVRational){ 0, 1 };
71  frame->key_frame = 1;
72  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
73  frame->format = -1; /* unknown */
74  frame->extended_data = frame->data;
75  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
76  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
77  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
78  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
79  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
80  frame->flags = 0;
81 }
82 
83 static void free_side_data(AVFrameSideData **ptr_sd)
84 {
85  AVFrameSideData *sd = *ptr_sd;
86 
87  av_buffer_unref(&sd->buf);
88  av_dict_free(&sd->metadata);
89  av_freep(ptr_sd);
90 }
91 
93 {
94  int i;
95 
96  for (i = 0; i < frame->nb_side_data; i++) {
97  free_side_data(&frame->side_data[i]);
98  }
99  frame->nb_side_data = 0;
100 
101  av_freep(&frame->side_data);
102 }
103 
105 {
106  AVFrame *frame = av_malloc(sizeof(*frame));
107 
108  if (!frame)
109  return NULL;
110 
112 
113  return frame;
114 }
115 
117 {
118  if (!frame || !*frame)
119  return;
120 
122  av_freep(frame);
123 }
124 
125 static int get_video_buffer(AVFrame *frame, int align)
126 {
128  int ret, i, padded_height, total_size;
129  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
130  ptrdiff_t linesizes[4];
131  size_t sizes[4];
132 
133  if (!desc)
134  return AVERROR(EINVAL);
135 
136  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
137  return ret;
138 
139  if (!frame->linesize[0]) {
140  if (align <= 0)
141  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
142 
143  for(i=1; i<=align; i+=i) {
144  ret = av_image_fill_linesizes(frame->linesize, frame->format,
145  FFALIGN(frame->width, i));
146  if (ret < 0)
147  return ret;
148  if (!(frame->linesize[0] & (align-1)))
149  break;
150  }
151 
152  for (i = 0; i < 4 && frame->linesize[i]; i++)
153  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
154  }
155 
156  for (i = 0; i < 4; i++)
157  linesizes[i] = frame->linesize[i];
158 
159  padded_height = FFALIGN(frame->height, 32);
160  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
161  padded_height, linesizes)) < 0)
162  return ret;
163 
164  total_size = 4*plane_padding;
165  for (i = 0; i < 4; i++) {
166  if (sizes[i] > INT_MAX - total_size)
167  return AVERROR(EINVAL);
168  total_size += sizes[i];
169  }
170 
171  frame->buf[0] = av_buffer_alloc(total_size);
172  if (!frame->buf[0]) {
173  ret = AVERROR(ENOMEM);
174  goto fail;
175  }
176 
177  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
178  frame->buf[0]->data, frame->linesize)) < 0)
179  goto fail;
180 
181  for (i = 1; i < 4; i++) {
182  if (frame->data[i])
183  frame->data[i] += i * plane_padding;
184  }
185 
186  frame->extended_data = frame->data;
187 
188  return 0;
189 fail:
191  return ret;
192 }
193 
194 static int get_audio_buffer(AVFrame *frame, int align)
195 {
196  int planar = av_sample_fmt_is_planar(frame->format);
197  int channels, planes;
198  int ret, i;
199 
200 #if FF_API_OLD_CHANNEL_LAYOUT
202  if (!frame->ch_layout.nb_channels) {
203  if (frame->channel_layout) {
204  av_channel_layout_from_mask(&frame->ch_layout, frame->channel_layout);
205  } else {
206  frame->ch_layout.nb_channels = frame->channels;
207  frame->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
208  }
209  }
210  frame->channels = frame->ch_layout.nb_channels;
211  frame->channel_layout = frame->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
212  frame->ch_layout.u.mask : 0;
214 #endif
215  channels = frame->ch_layout.nb_channels;
216  planes = planar ? channels : 1;
217  if (!frame->linesize[0]) {
218  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
219  frame->nb_samples, frame->format,
220  align);
221  if (ret < 0)
222  return ret;
223  }
224 
226  frame->extended_data = av_calloc(planes,
227  sizeof(*frame->extended_data));
228  frame->extended_buf = av_calloc(planes - AV_NUM_DATA_POINTERS,
229  sizeof(*frame->extended_buf));
230  if (!frame->extended_data || !frame->extended_buf) {
231  av_freep(&frame->extended_data);
232  av_freep(&frame->extended_buf);
233  return AVERROR(ENOMEM);
234  }
235  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
236  } else
237  frame->extended_data = frame->data;
238 
239  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
240  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
241  if (!frame->buf[i]) {
243  return AVERROR(ENOMEM);
244  }
245  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
246  }
247  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
248  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
249  if (!frame->extended_buf[i]) {
251  return AVERROR(ENOMEM);
252  }
253  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
254  }
255  return 0;
256 
257 }
258 
260 {
261  if (frame->format < 0)
262  return AVERROR(EINVAL);
263 
265  if (frame->width > 0 && frame->height > 0)
266  return get_video_buffer(frame, align);
267  else if (frame->nb_samples > 0 &&
268  (av_channel_layout_check(&frame->ch_layout)
270  || frame->channel_layout || frame->channels > 0
271 #endif
272  ))
273  return get_audio_buffer(frame, align);
275 
276  return AVERROR(EINVAL);
277 }
278 
279 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
280 {
281  int ret, i;
282 
283  dst->key_frame = src->key_frame;
284  dst->pict_type = src->pict_type;
285  dst->sample_aspect_ratio = src->sample_aspect_ratio;
286  dst->crop_top = src->crop_top;
287  dst->crop_bottom = src->crop_bottom;
288  dst->crop_left = src->crop_left;
289  dst->crop_right = src->crop_right;
290  dst->pts = src->pts;
291  dst->duration = src->duration;
292  dst->repeat_pict = src->repeat_pict;
293  dst->interlaced_frame = src->interlaced_frame;
294  dst->top_field_first = src->top_field_first;
295  dst->palette_has_changed = src->palette_has_changed;
296  dst->sample_rate = src->sample_rate;
297  dst->opaque = src->opaque;
298  dst->pkt_dts = src->pkt_dts;
299  dst->pkt_pos = src->pkt_pos;
300  dst->pkt_size = src->pkt_size;
301 #if FF_API_PKT_DURATION
303  dst->pkt_duration = src->pkt_duration;
305 #endif
306  dst->time_base = src->time_base;
307  dst->reordered_opaque = src->reordered_opaque;
308  dst->quality = src->quality;
309  dst->best_effort_timestamp = src->best_effort_timestamp;
310  dst->coded_picture_number = src->coded_picture_number;
311  dst->display_picture_number = src->display_picture_number;
312  dst->flags = src->flags;
313  dst->decode_error_flags = src->decode_error_flags;
314  dst->color_primaries = src->color_primaries;
315  dst->color_trc = src->color_trc;
316  dst->colorspace = src->colorspace;
317  dst->color_range = src->color_range;
318  dst->chroma_location = src->chroma_location;
319 
320  av_dict_copy(&dst->metadata, src->metadata, 0);
321 
322  for (i = 0; i < src->nb_side_data; i++) {
323  const AVFrameSideData *sd_src = src->side_data[i];
324  AVFrameSideData *sd_dst;
325  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
326  && (src->width != dst->width || src->height != dst->height))
327  continue;
328  if (force_copy) {
329  sd_dst = av_frame_new_side_data(dst, sd_src->type,
330  sd_src->size);
331  if (!sd_dst) {
332  wipe_side_data(dst);
333  return AVERROR(ENOMEM);
334  }
335  memcpy(sd_dst->data, sd_src->data, sd_src->size);
336  } else {
337  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
338  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
339  if (!sd_dst) {
341  wipe_side_data(dst);
342  return AVERROR(ENOMEM);
343  }
344  }
345  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
346  }
347 
348  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
349  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
350  return ret;
351 }
352 
353 int av_frame_ref(AVFrame *dst, const AVFrame *src)
354 {
355  int i, ret = 0;
356 
357  av_assert1(dst->width == 0 && dst->height == 0);
358 #if FF_API_OLD_CHANNEL_LAYOUT
360  av_assert1(dst->channels == 0);
362 #endif
363  av_assert1(dst->ch_layout.nb_channels == 0 &&
365 
366  dst->format = src->format;
367  dst->width = src->width;
368  dst->height = src->height;
369  dst->nb_samples = src->nb_samples;
370 #if FF_API_OLD_CHANNEL_LAYOUT
372  dst->channels = src->channels;
373  dst->channel_layout = src->channel_layout;
374  if (!av_channel_layout_check(&src->ch_layout)) {
375  if (src->channel_layout)
376  av_channel_layout_from_mask(&dst->ch_layout, src->channel_layout);
377  else {
378  dst->ch_layout.nb_channels = src->channels;
380  }
381  }
383 #endif
384 
385  ret = frame_copy_props(dst, src, 0);
386  if (ret < 0)
387  goto fail;
388 
389  // this check is needed only until FF_API_OLD_CHANNEL_LAYOUT is out
390  if (av_channel_layout_check(&src->ch_layout)) {
391  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
392  if (ret < 0)
393  goto fail;
394  }
395 
396  /* duplicate the frame data if it's not refcounted */
397  if (!src->buf[0]) {
398  ret = av_frame_get_buffer(dst, 0);
399  if (ret < 0)
400  goto fail;
401 
402  ret = av_frame_copy(dst, src);
403  if (ret < 0)
404  goto fail;
405 
406  return 0;
407  }
408 
409  /* ref the buffers */
410  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
411  if (!src->buf[i])
412  continue;
413  dst->buf[i] = av_buffer_ref(src->buf[i]);
414  if (!dst->buf[i]) {
415  ret = AVERROR(ENOMEM);
416  goto fail;
417  }
418  }
419 
420  if (src->extended_buf) {
421  dst->extended_buf = av_calloc(src->nb_extended_buf,
422  sizeof(*dst->extended_buf));
423  if (!dst->extended_buf) {
424  ret = AVERROR(ENOMEM);
425  goto fail;
426  }
427  dst->nb_extended_buf = src->nb_extended_buf;
428 
429  for (i = 0; i < src->nb_extended_buf; i++) {
430  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
431  if (!dst->extended_buf[i]) {
432  ret = AVERROR(ENOMEM);
433  goto fail;
434  }
435  }
436  }
437 
438  if (src->hw_frames_ctx) {
439  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
440  if (!dst->hw_frames_ctx) {
441  ret = AVERROR(ENOMEM);
442  goto fail;
443  }
444  }
445 
446  /* duplicate extended data */
447  if (src->extended_data != src->data) {
448  int ch = dst->ch_layout.nb_channels;
449 
450  if (!ch) {
451  ret = AVERROR(EINVAL);
452  goto fail;
453  }
454 
455  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
456  if (!dst->extended_data) {
457  ret = AVERROR(ENOMEM);
458  goto fail;
459  }
460  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
461  } else
462  dst->extended_data = dst->data;
463 
464  memcpy(dst->data, src->data, sizeof(src->data));
465  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
466 
467  return 0;
468 
469 fail:
470  av_frame_unref(dst);
471  return ret;
472 }
473 
475 {
477 
478  if (!ret)
479  return NULL;
480 
481  if (av_frame_ref(ret, src) < 0)
482  av_frame_free(&ret);
483 
484  return ret;
485 }
486 
488 {
489  int i;
490 
491  if (!frame)
492  return;
493 
495 
496  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
497  av_buffer_unref(&frame->buf[i]);
498  for (i = 0; i < frame->nb_extended_buf; i++)
499  av_buffer_unref(&frame->extended_buf[i]);
500  av_freep(&frame->extended_buf);
501  av_dict_free(&frame->metadata);
502 
503  av_buffer_unref(&frame->hw_frames_ctx);
504 
505  av_buffer_unref(&frame->opaque_ref);
506  av_buffer_unref(&frame->private_ref);
507 
508  if (frame->extended_data != frame->data)
509  av_freep(&frame->extended_data);
510 
511  av_channel_layout_uninit(&frame->ch_layout);
512 
514 }
515 
517 {
518  av_assert1(dst->width == 0 && dst->height == 0);
519 #if FF_API_OLD_CHANNEL_LAYOUT
521  av_assert1(dst->channels == 0);
523 #endif
524  av_assert1(dst->ch_layout.nb_channels == 0 &&
526 
527  *dst = *src;
528  if (src->extended_data == src->data)
529  dst->extended_data = dst->data;
531 }
532 
534 {
535  int i, ret = 1;
536 
537  /* assume non-refcounted frames are not writable */
538  if (!frame->buf[0])
539  return 0;
540 
541  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
542  if (frame->buf[i])
543  ret &= !!av_buffer_is_writable(frame->buf[i]);
544  for (i = 0; i < frame->nb_extended_buf; i++)
545  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
546 
547  return ret;
548 }
549 
551 {
552  AVFrame tmp;
553  int ret;
554 
556  return 0;
557 
558  memset(&tmp, 0, sizeof(tmp));
559  tmp.format = frame->format;
560  tmp.width = frame->width;
561  tmp.height = frame->height;
562 #if FF_API_OLD_CHANNEL_LAYOUT
564  tmp.channels = frame->channels;
565  tmp.channel_layout = frame->channel_layout;
567 #endif
568  tmp.nb_samples = frame->nb_samples;
569  ret = av_channel_layout_copy(&tmp.ch_layout, &frame->ch_layout);
570  if (ret < 0) {
572  return ret;
573  }
574 
575  if (frame->hw_frames_ctx)
576  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
577  else
578  ret = av_frame_get_buffer(&tmp, 0);
579  if (ret < 0)
580  return ret;
581 
582  ret = av_frame_copy(&tmp, frame);
583  if (ret < 0) {
585  return ret;
586  }
587 
589  if (ret < 0) {
591  return ret;
592  }
593 
595 
596  *frame = tmp;
597  if (tmp.data == tmp.extended_data)
598  frame->extended_data = frame->data;
599 
600  return 0;
601 }
602 
604 {
605  return frame_copy_props(dst, src, 1);
606 }
607 
609 {
610  uint8_t *data;
611  int planes, i;
612 
613  if (frame->nb_samples) {
614  int channels = frame->ch_layout.nb_channels;
615 
616 #if FF_API_OLD_CHANNEL_LAYOUT
618  if (!channels) {
619  channels = frame->channels;
621  }
623 #endif
624  if (!channels)
625  return NULL;
626  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
627  } else
628  planes = 4;
629 
630  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
631  return NULL;
632  data = frame->extended_data[plane];
633 
634  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
635  AVBufferRef *buf = frame->buf[i];
636  if (data >= buf->data && data < buf->data + buf->size)
637  return buf;
638  }
639  for (i = 0; i < frame->nb_extended_buf; i++) {
640  AVBufferRef *buf = frame->extended_buf[i];
641  if (data >= buf->data && data < buf->data + buf->size)
642  return buf;
643  }
644  return NULL;
645 }
646 
649  AVBufferRef *buf)
650 {
651  AVFrameSideData *ret, **tmp;
652 
653  if (!buf)
654  return NULL;
655 
656  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
657  return NULL;
658 
659  tmp = av_realloc(frame->side_data,
660  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
661  if (!tmp)
662  return NULL;
663  frame->side_data = tmp;
664 
665  ret = av_mallocz(sizeof(*ret));
666  if (!ret)
667  return NULL;
668 
669  ret->buf = buf;
670  ret->data = ret->buf->data;
671  ret->size = buf->size;
672  ret->type = type;
673 
674  frame->side_data[frame->nb_side_data++] = ret;
675 
676  return ret;
677 }
678 
681  size_t size)
682 {
686  if (!ret)
687  av_buffer_unref(&buf);
688  return ret;
689 }
690 
693 {
694  int i;
695 
696  for (i = 0; i < frame->nb_side_data; i++) {
697  if (frame->side_data[i]->type == type)
698  return frame->side_data[i];
699  }
700  return NULL;
701 }
702 
703 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
704 {
705  const uint8_t *src_data[4];
706  int i, planes;
707 
708  if (dst->width < src->width ||
709  dst->height < src->height)
710  return AVERROR(EINVAL);
711 
712  if (src->hw_frames_ctx || dst->hw_frames_ctx)
713  return av_hwframe_transfer_data(dst, src, 0);
714 
716  for (i = 0; i < planes; i++)
717  if (!dst->data[i] || !src->data[i])
718  return AVERROR(EINVAL);
719 
720  memcpy(src_data, src->data, sizeof(src_data));
721  av_image_copy(dst->data, dst->linesize,
722  src_data, src->linesize,
723  dst->format, src->width, src->height);
724 
725  return 0;
726 }
727 
728 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
729 {
731  int channels = dst->ch_layout.nb_channels;
732  int planes = planar ? channels : 1;
733  int i;
734 
735 #if FF_API_OLD_CHANNEL_LAYOUT
737  if (!channels || !src->ch_layout.nb_channels) {
738  if (dst->channels != src->channels ||
739  dst->channel_layout != src->channel_layout)
740  return AVERROR(EINVAL);
742  }
743  if (!channels) {
744  channels = dst->channels;
745  planes = planar ? channels : 1;
746  }
748 #endif
749 
750  if (dst->nb_samples != src->nb_samples ||
753  av_channel_layout_check(&src->ch_layout) &&
754 #endif
755  av_channel_layout_compare(&dst->ch_layout, &src->ch_layout))
757  )
758 #endif
759  return AVERROR(EINVAL);
760 
761  for (i = 0; i < planes; i++)
762  if (!dst->extended_data[i] || !src->extended_data[i])
763  return AVERROR(EINVAL);
764 
765  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
766  dst->nb_samples, channels, dst->format);
767 
768  return 0;
769 }
770 
771 int av_frame_copy(AVFrame *dst, const AVFrame *src)
772 {
773  if (dst->format != src->format || dst->format < 0)
774  return AVERROR(EINVAL);
775 
777  if (dst->width > 0 && dst->height > 0)
778  return frame_copy_video(dst, src);
779  else if (dst->nb_samples > 0 &&
782  || dst->channels > 0
783 #endif
784  ))
785  return frame_copy_audio(dst, src);
787 
788  return AVERROR(EINVAL);
789 }
790 
792 {
793  int i;
794 
795  for (i = frame->nb_side_data - 1; i >= 0; i--) {
796  AVFrameSideData *sd = frame->side_data[i];
797  if (sd->type == type) {
798  free_side_data(&frame->side_data[i]);
799  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
800  frame->nb_side_data--;
801  }
802  }
803 }
804 
806 {
807  switch(type) {
808  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
809  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
810  case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
811  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
812  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
813  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
814  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
815  case AV_FRAME_DATA_AFD: return "Active format description";
816  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
817  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
818  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
819  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
820  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
821  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
822  case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
823  case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
824  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
825  case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
826  case AV_FRAME_DATA_DYNAMIC_HDR_VIVID: return "HDR Dynamic Metadata CUVA 005.1 2021 (Vivid)";
827  case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
828  case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
829  case AV_FRAME_DATA_SEI_UNREGISTERED: return "H.26[45] User Data Unregistered SEI message";
830  case AV_FRAME_DATA_FILM_GRAIN_PARAMS: return "Film grain parameters";
831  case AV_FRAME_DATA_DETECTION_BBOXES: return "Bounding boxes for object detection and classification";
832  case AV_FRAME_DATA_DOVI_RPU_BUFFER: return "Dolby Vision RPU Data";
833  case AV_FRAME_DATA_DOVI_METADATA: return "Dolby Vision Metadata";
834  }
835  return NULL;
836 }
837 
838 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
839  const AVPixFmtDescriptor *desc)
840 {
841  int i, j;
842 
843  for (i = 0; frame->data[i]; i++) {
845  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
846  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
847 
848  if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
849  offsets[i] = 0;
850  break;
851  }
852 
853  /* find any component descriptor for this plane */
854  for (j = 0; j < desc->nb_components; j++) {
855  if (desc->comp[j].plane == i) {
856  comp = &desc->comp[j];
857  break;
858  }
859  }
860  if (!comp)
861  return AVERROR_BUG;
862 
863  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
864  (frame->crop_left >> shift_x) * comp->step;
865  }
866 
867  return 0;
868 }
869 
871 {
872  const AVPixFmtDescriptor *desc;
873  size_t offsets[4];
874  int i;
875 
876  if (!(frame->width > 0 && frame->height > 0))
877  return AVERROR(EINVAL);
878 
879  if (frame->crop_left >= INT_MAX - frame->crop_right ||
880  frame->crop_top >= INT_MAX - frame->crop_bottom ||
881  (frame->crop_left + frame->crop_right) >= frame->width ||
882  (frame->crop_top + frame->crop_bottom) >= frame->height)
883  return AVERROR(ERANGE);
884 
885  desc = av_pix_fmt_desc_get(frame->format);
886  if (!desc)
887  return AVERROR_BUG;
888 
889  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
890  * formats cannot be easily handled here either (and corresponding decoders
891  * should not export any cropping anyway), so do the same for those as well.
892  * */
894  frame->width -= frame->crop_right;
895  frame->height -= frame->crop_bottom;
896  frame->crop_right = 0;
897  frame->crop_bottom = 0;
898  return 0;
899  }
900 
901  /* calculate the offsets for each plane */
903 
904  /* adjust the offsets to avoid breaking alignment */
905  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
906  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
907  int min_log2_align = INT_MAX;
908 
909  for (i = 0; frame->data[i]; i++) {
910  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
911  min_log2_align = FFMIN(log2_align, min_log2_align);
912  }
913 
914  /* we assume, and it should always be true, that the data alignment is
915  * related to the cropping alignment by a constant power-of-2 factor */
916  if (log2_crop_align < min_log2_align)
917  return AVERROR_BUG;
918 
919  if (min_log2_align < 5) {
920  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
922  }
923  }
924 
925  for (i = 0; frame->data[i]; i++)
926  frame->data[i] += offsets[i];
927 
928  frame->width -= (frame->crop_left + frame->crop_right);
929  frame->height -= (frame->crop_top + frame->crop_bottom);
930  frame->crop_left = 0;
931  frame->crop_right = 0;
932  frame->crop_top = 0;
933  frame->crop_bottom = 0;
934 
935  return 0;
936 }
AVFrame::extended_buf
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers,...
Definition: frame.h:539
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:582
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:82
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:83
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:578
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:125
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:259
ff_ctz
#define ff_ctz
Definition: intmath.h:107
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:691
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:80
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:679
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:714
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2858
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
AV_FRAME_DATA_DOVI_METADATA
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
Definition: frame.h:204
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:452
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:184
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:580
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
AVFrame::opaque
void * opaque
for some private data of the user
Definition: frame.h:466
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:589
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:550
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:236
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:432
AVFrame::width
int width
Definition: frame.h:397
AVCOL_SPC_YCOCG
@ AVCOL_SPC_YCOCG
Definition: pixfmt.h:597
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:482
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:561
data
const char data[16]
Definition: mxf.c:146
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:197
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:279
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:588
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:194
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:571
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:306
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:525
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:703
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:870
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AVFrame::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:675
AVFrame::chroma_location
enum AVChromaLocation chroma_location
Definition: frame.h:591
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2898
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:593
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:68
fail
#define fail()
Definition: checkasm.h:134
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
samplefmt.h
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:417
wipe_side_data
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:92
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:709
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:145
AVFrame::channels
attribute_deprecated int channels
number of audio channels, only used for audio.
Definition: frame.h:648
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:49
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:55
avassert.h
AVFrameSideData::size
size_t size
Definition: frame.h:234
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVFrame::channel_layout
attribute_deprecated uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:510
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:594
AVFrame::pkt_pos
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:605
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
av_channel_layout_from_mask
FF_ENABLE_DEPRECATION_WARNINGS int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask)
Initialize a native channel layout from a bitmask indicating which channels are present.
Definition: channel_layout.c:391
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:114
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:114
channels
channels
Definition: aptx.h:31
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:474
AVFrame::crop_right
size_t crop_right
Definition: frame.h:688
planes
static const struct @345 planes[]
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:536
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:211
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:728
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:131
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:57
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:603
av_realloc
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:153
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:647
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:439
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:627
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:178
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:686
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:598
AVFrame::crop_left
size_t crop_left
Definition: frame.h:687
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:422
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:77
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:771
cpu.h
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:461
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:502
FF_API_OLD_CHANNEL_LAYOUT
#define FF_API_OLD_CHANNEL_LAYOUT
Definition: version.h:115
size
int size
Definition: twinvq_data.h:10344
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:326
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_get_colorspace_name
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:39
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:447
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:124
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:533
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:681
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:412
frame.h
buffer.h
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:791
AV_CHANNEL_ORDER_NATIVE
@ AV_CHANNEL_ORDER_NATIVE
The native channel order, i.e.
Definition: channel_layout.h:118
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:932
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
AVFrame::private_ref
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:704
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:109
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:595
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:477
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:222
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:405
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:386
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:587
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:64
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:516
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:487
AVFrame::pkt_duration
attribute_deprecated int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:617
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:590
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:147
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:634
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:906
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:427
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:448
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:664
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:159
AVFrame::height
int height
Definition: frame.h:397
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:929
channel_layout.h
AVFrame::palette_has_changed
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:487
AV_FRAME_DATA_VIDEO_ENC_PARAMS
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:170
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:592
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:625
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:632
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:232
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVFrame::pkt_size
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:658
AVFrame::reordered_opaque
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything).
Definition: frame.h:497
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:639
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
desc
const char * desc
Definition: libsvtav1.c:83
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:838
AVFrame::crop_top
size_t crop_top
Definition: frame.h:685
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:805
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:165
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:235
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:608
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
CHECK_CHANNELS_CONSISTENCY
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:32
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:589
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:456
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:507
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
AVFrame::repeat_pict
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:472
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:190
AVFrame::nb_extended_buf
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:543