FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "cpu.h"
24 #include "dict.h"
25 #include "frame.h"
26 #include "imgutils.h"
27 #include "mem.h"
28 #include "samplefmt.h"
29 #include "hwcontext.h"
30 
31 #if FF_API_OLD_CHANNEL_LAYOUT
32 #define CHECK_CHANNELS_CONSISTENCY(frame) \
33  av_assert2(!(frame)->channel_layout || \
34  (frame)->channels == \
35  av_get_channel_layout_nb_channels((frame)->channel_layout))
36 #endif
37 
39 {
40  memset(frame, 0, sizeof(*frame));
41 
42  frame->pts =
43  frame->pkt_dts = AV_NOPTS_VALUE;
44  frame->best_effort_timestamp = AV_NOPTS_VALUE;
45  frame->duration = 0;
46 #if FF_API_PKT_DURATION
48  frame->pkt_duration = 0;
50 #endif
51  frame->pkt_pos = -1;
52  frame->pkt_size = -1;
53  frame->time_base = (AVRational){ 0, 1 };
54  frame->key_frame = 1;
55  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
56  frame->format = -1; /* unknown */
57  frame->extended_data = frame->data;
58  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
59  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
60  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
61  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
62  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
63  frame->flags = 0;
64 }
65 
66 static void free_side_data(AVFrameSideData **ptr_sd)
67 {
68  AVFrameSideData *sd = *ptr_sd;
69 
70  av_buffer_unref(&sd->buf);
71  av_dict_free(&sd->metadata);
72  av_freep(ptr_sd);
73 }
74 
76 {
77  int i;
78 
79  for (i = 0; i < frame->nb_side_data; i++) {
80  free_side_data(&frame->side_data[i]);
81  }
82  frame->nb_side_data = 0;
83 
84  av_freep(&frame->side_data);
85 }
86 
88 {
89  AVFrame *frame = av_malloc(sizeof(*frame));
90 
91  if (!frame)
92  return NULL;
93 
95 
96  return frame;
97 }
98 
100 {
101  if (!frame || !*frame)
102  return;
103 
105  av_freep(frame);
106 }
107 
109 {
111  int ret, i, padded_height, total_size;
112  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
113  ptrdiff_t linesizes[4];
114  size_t sizes[4];
115 
116  if (!desc)
117  return AVERROR(EINVAL);
118 
119  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
120  return ret;
121 
122  if (!frame->linesize[0]) {
123  if (align <= 0)
124  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
125 
126  for(i=1; i<=align; i+=i) {
127  ret = av_image_fill_linesizes(frame->linesize, frame->format,
128  FFALIGN(frame->width, i));
129  if (ret < 0)
130  return ret;
131  if (!(frame->linesize[0] & (align-1)))
132  break;
133  }
134 
135  for (i = 0; i < 4 && frame->linesize[i]; i++)
136  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
137  }
138 
139  for (i = 0; i < 4; i++)
140  linesizes[i] = frame->linesize[i];
141 
142  padded_height = FFALIGN(frame->height, 32);
143  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
144  padded_height, linesizes)) < 0)
145  return ret;
146 
147  total_size = 4*plane_padding;
148  for (i = 0; i < 4; i++) {
149  if (sizes[i] > INT_MAX - total_size)
150  return AVERROR(EINVAL);
151  total_size += sizes[i];
152  }
153 
154  frame->buf[0] = av_buffer_alloc(total_size);
155  if (!frame->buf[0]) {
156  ret = AVERROR(ENOMEM);
157  goto fail;
158  }
159 
160  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
161  frame->buf[0]->data, frame->linesize)) < 0)
162  goto fail;
163 
164  for (i = 1; i < 4; i++) {
165  if (frame->data[i])
166  frame->data[i] += i * plane_padding;
167  }
168 
169  frame->extended_data = frame->data;
170 
171  return 0;
172 fail:
174  return ret;
175 }
176 
178 {
179  int planar = av_sample_fmt_is_planar(frame->format);
180  int channels, planes;
181  int ret, i;
182 
183 #if FF_API_OLD_CHANNEL_LAYOUT
185  if (!frame->ch_layout.nb_channels) {
186  if (frame->channel_layout) {
187  av_channel_layout_from_mask(&frame->ch_layout, frame->channel_layout);
188  } else {
189  frame->ch_layout.nb_channels = frame->channels;
190  frame->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
191  }
192  }
193  frame->channels = frame->ch_layout.nb_channels;
194  frame->channel_layout = frame->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
195  frame->ch_layout.u.mask : 0;
197 #endif
198  channels = frame->ch_layout.nb_channels;
199  planes = planar ? channels : 1;
200  if (!frame->linesize[0]) {
201  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
202  frame->nb_samples, frame->format,
203  align);
204  if (ret < 0)
205  return ret;
206  }
207 
209  frame->extended_data = av_calloc(planes,
210  sizeof(*frame->extended_data));
211  frame->extended_buf = av_calloc(planes - AV_NUM_DATA_POINTERS,
212  sizeof(*frame->extended_buf));
213  if (!frame->extended_data || !frame->extended_buf) {
214  av_freep(&frame->extended_data);
215  av_freep(&frame->extended_buf);
216  return AVERROR(ENOMEM);
217  }
218  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
219  } else
220  frame->extended_data = frame->data;
221 
222  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
223  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
224  if (!frame->buf[i]) {
226  return AVERROR(ENOMEM);
227  }
228  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
229  }
230  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
231  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
232  if (!frame->extended_buf[i]) {
234  return AVERROR(ENOMEM);
235  }
236  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
237  }
238  return 0;
239 
240 }
241 
243 {
244  if (frame->format < 0)
245  return AVERROR(EINVAL);
246 
248  if (frame->width > 0 && frame->height > 0)
249  return get_video_buffer(frame, align);
250  else if (frame->nb_samples > 0 &&
251  (av_channel_layout_check(&frame->ch_layout)
253  || frame->channel_layout || frame->channels > 0
254 #endif
255  ))
256  return get_audio_buffer(frame, align);
258 
259  return AVERROR(EINVAL);
260 }
261 
262 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
263 {
264  int ret, i;
265 
266  dst->key_frame = src->key_frame;
267  dst->pict_type = src->pict_type;
268  dst->sample_aspect_ratio = src->sample_aspect_ratio;
269  dst->crop_top = src->crop_top;
270  dst->crop_bottom = src->crop_bottom;
271  dst->crop_left = src->crop_left;
272  dst->crop_right = src->crop_right;
273  dst->pts = src->pts;
274  dst->duration = src->duration;
275  dst->repeat_pict = src->repeat_pict;
276  dst->interlaced_frame = src->interlaced_frame;
277  dst->top_field_first = src->top_field_first;
278  dst->palette_has_changed = src->palette_has_changed;
279  dst->sample_rate = src->sample_rate;
280  dst->opaque = src->opaque;
281  dst->pkt_dts = src->pkt_dts;
282  dst->pkt_pos = src->pkt_pos;
283  dst->pkt_size = src->pkt_size;
284 #if FF_API_PKT_DURATION
286  dst->pkt_duration = src->pkt_duration;
288 #endif
289  dst->time_base = src->time_base;
290 #if FF_API_REORDERED_OPAQUE
292  dst->reordered_opaque = src->reordered_opaque;
294 #endif
295  dst->quality = src->quality;
296  dst->best_effort_timestamp = src->best_effort_timestamp;
297 #if FF_API_FRAME_PICTURE_NUMBER
299  dst->coded_picture_number = src->coded_picture_number;
300  dst->display_picture_number = src->display_picture_number;
302 #endif
303  dst->flags = src->flags;
304  dst->decode_error_flags = src->decode_error_flags;
305  dst->color_primaries = src->color_primaries;
306  dst->color_trc = src->color_trc;
307  dst->colorspace = src->colorspace;
308  dst->color_range = src->color_range;
309  dst->chroma_location = src->chroma_location;
310 
311  av_dict_copy(&dst->metadata, src->metadata, 0);
312 
313  for (i = 0; i < src->nb_side_data; i++) {
314  const AVFrameSideData *sd_src = src->side_data[i];
315  AVFrameSideData *sd_dst;
316  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
317  && (src->width != dst->width || src->height != dst->height))
318  continue;
319  if (force_copy) {
320  sd_dst = av_frame_new_side_data(dst, sd_src->type,
321  sd_src->size);
322  if (!sd_dst) {
323  wipe_side_data(dst);
324  return AVERROR(ENOMEM);
325  }
326  memcpy(sd_dst->data, sd_src->data, sd_src->size);
327  } else {
328  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
329  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
330  if (!sd_dst) {
332  wipe_side_data(dst);
333  return AVERROR(ENOMEM);
334  }
335  }
336  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
337  }
338 
339  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
340  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
341  return ret;
342 }
343 
344 int av_frame_ref(AVFrame *dst, const AVFrame *src)
345 {
346  int i, ret = 0;
347 
348  av_assert1(dst->width == 0 && dst->height == 0);
349 #if FF_API_OLD_CHANNEL_LAYOUT
351  av_assert1(dst->channels == 0);
353 #endif
354  av_assert1(dst->ch_layout.nb_channels == 0 &&
356 
357  dst->format = src->format;
358  dst->width = src->width;
359  dst->height = src->height;
360  dst->nb_samples = src->nb_samples;
361 #if FF_API_OLD_CHANNEL_LAYOUT
363  dst->channels = src->channels;
364  dst->channel_layout = src->channel_layout;
365  if (!av_channel_layout_check(&src->ch_layout)) {
366  if (src->channel_layout)
367  av_channel_layout_from_mask(&dst->ch_layout, src->channel_layout);
368  else {
369  dst->ch_layout.nb_channels = src->channels;
371  }
372  }
374 #endif
375 
376  ret = frame_copy_props(dst, src, 0);
377  if (ret < 0)
378  goto fail;
379 
380  // this check is needed only until FF_API_OLD_CHANNEL_LAYOUT is out
381  if (av_channel_layout_check(&src->ch_layout)) {
382  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
383  if (ret < 0)
384  goto fail;
385  }
386 
387  /* duplicate the frame data if it's not refcounted */
388  if (!src->buf[0]) {
389  ret = av_frame_get_buffer(dst, 0);
390  if (ret < 0)
391  goto fail;
392 
393  ret = av_frame_copy(dst, src);
394  if (ret < 0)
395  goto fail;
396 
397  return 0;
398  }
399 
400  /* ref the buffers */
401  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
402  if (!src->buf[i])
403  continue;
404  dst->buf[i] = av_buffer_ref(src->buf[i]);
405  if (!dst->buf[i]) {
406  ret = AVERROR(ENOMEM);
407  goto fail;
408  }
409  }
410 
411  if (src->extended_buf) {
412  dst->extended_buf = av_calloc(src->nb_extended_buf,
413  sizeof(*dst->extended_buf));
414  if (!dst->extended_buf) {
415  ret = AVERROR(ENOMEM);
416  goto fail;
417  }
418  dst->nb_extended_buf = src->nb_extended_buf;
419 
420  for (i = 0; i < src->nb_extended_buf; i++) {
421  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
422  if (!dst->extended_buf[i]) {
423  ret = AVERROR(ENOMEM);
424  goto fail;
425  }
426  }
427  }
428 
429  if (src->hw_frames_ctx) {
430  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
431  if (!dst->hw_frames_ctx) {
432  ret = AVERROR(ENOMEM);
433  goto fail;
434  }
435  }
436 
437  /* duplicate extended data */
438  if (src->extended_data != src->data) {
439  int ch = dst->ch_layout.nb_channels;
440 
441  if (!ch) {
442  ret = AVERROR(EINVAL);
443  goto fail;
444  }
445 
446  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
447  if (!dst->extended_data) {
448  ret = AVERROR(ENOMEM);
449  goto fail;
450  }
451  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
452  } else
453  dst->extended_data = dst->data;
454 
455  memcpy(dst->data, src->data, sizeof(src->data));
456  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
457 
458  return 0;
459 
460 fail:
461  av_frame_unref(dst);
462  return ret;
463 }
464 
466 {
468 
469  if (!ret)
470  return NULL;
471 
472  if (av_frame_ref(ret, src) < 0)
473  av_frame_free(&ret);
474 
475  return ret;
476 }
477 
479 {
480  int i;
481 
482  if (!frame)
483  return;
484 
486 
487  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
488  av_buffer_unref(&frame->buf[i]);
489  for (i = 0; i < frame->nb_extended_buf; i++)
490  av_buffer_unref(&frame->extended_buf[i]);
491  av_freep(&frame->extended_buf);
492  av_dict_free(&frame->metadata);
493 
494  av_buffer_unref(&frame->hw_frames_ctx);
495 
496  av_buffer_unref(&frame->opaque_ref);
497  av_buffer_unref(&frame->private_ref);
498 
499  if (frame->extended_data != frame->data)
500  av_freep(&frame->extended_data);
501 
502  av_channel_layout_uninit(&frame->ch_layout);
503 
505 }
506 
508 {
509  av_assert1(dst->width == 0 && dst->height == 0);
510 #if FF_API_OLD_CHANNEL_LAYOUT
512  av_assert1(dst->channels == 0);
514 #endif
515  av_assert1(dst->ch_layout.nb_channels == 0 &&
517 
518  *dst = *src;
519  if (src->extended_data == src->data)
520  dst->extended_data = dst->data;
522 }
523 
525 {
526  int i, ret = 1;
527 
528  /* assume non-refcounted frames are not writable */
529  if (!frame->buf[0])
530  return 0;
531 
532  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
533  if (frame->buf[i])
534  ret &= !!av_buffer_is_writable(frame->buf[i]);
535  for (i = 0; i < frame->nb_extended_buf; i++)
536  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
537 
538  return ret;
539 }
540 
542 {
543  AVFrame tmp;
544  int ret;
545 
547  return 0;
548 
549  memset(&tmp, 0, sizeof(tmp));
550  tmp.format = frame->format;
551  tmp.width = frame->width;
552  tmp.height = frame->height;
553 #if FF_API_OLD_CHANNEL_LAYOUT
555  tmp.channels = frame->channels;
556  tmp.channel_layout = frame->channel_layout;
558 #endif
559  tmp.nb_samples = frame->nb_samples;
560  ret = av_channel_layout_copy(&tmp.ch_layout, &frame->ch_layout);
561  if (ret < 0) {
563  return ret;
564  }
565 
566  if (frame->hw_frames_ctx)
567  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
568  else
569  ret = av_frame_get_buffer(&tmp, 0);
570  if (ret < 0)
571  return ret;
572 
573  ret = av_frame_copy(&tmp, frame);
574  if (ret < 0) {
576  return ret;
577  }
578 
580  if (ret < 0) {
582  return ret;
583  }
584 
586 
587  *frame = tmp;
588  if (tmp.data == tmp.extended_data)
589  frame->extended_data = frame->data;
590 
591  return 0;
592 }
593 
595 {
596  return frame_copy_props(dst, src, 1);
597 }
598 
600 {
601  uint8_t *data;
602  int planes, i;
603 
604  if (frame->nb_samples) {
605  int channels = frame->ch_layout.nb_channels;
606 
607 #if FF_API_OLD_CHANNEL_LAYOUT
609  if (!channels) {
610  channels = frame->channels;
612  }
614 #endif
615  if (!channels)
616  return NULL;
617  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
618  } else
619  planes = 4;
620 
621  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
622  return NULL;
623  data = frame->extended_data[plane];
624 
625  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
626  AVBufferRef *buf = frame->buf[i];
627  if (data >= buf->data && data < buf->data + buf->size)
628  return buf;
629  }
630  for (i = 0; i < frame->nb_extended_buf; i++) {
631  AVBufferRef *buf = frame->extended_buf[i];
632  if (data >= buf->data && data < buf->data + buf->size)
633  return buf;
634  }
635  return NULL;
636 }
637 
640  AVBufferRef *buf)
641 {
642  AVFrameSideData *ret, **tmp;
643 
644  if (!buf)
645  return NULL;
646 
647  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
648  return NULL;
649 
650  tmp = av_realloc(frame->side_data,
651  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
652  if (!tmp)
653  return NULL;
654  frame->side_data = tmp;
655 
656  ret = av_mallocz(sizeof(*ret));
657  if (!ret)
658  return NULL;
659 
660  ret->buf = buf;
661  ret->data = ret->buf->data;
662  ret->size = buf->size;
663  ret->type = type;
664 
665  frame->side_data[frame->nb_side_data++] = ret;
666 
667  return ret;
668 }
669 
672  size_t size)
673 {
677  if (!ret)
678  av_buffer_unref(&buf);
679  return ret;
680 }
681 
684 {
685  int i;
686 
687  for (i = 0; i < frame->nb_side_data; i++) {
688  if (frame->side_data[i]->type == type)
689  return frame->side_data[i];
690  }
691  return NULL;
692 }
693 
694 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
695 {
696  const uint8_t *src_data[4];
697  int i, planes;
698 
699  if (dst->width < src->width ||
700  dst->height < src->height)
701  return AVERROR(EINVAL);
702 
703  if (src->hw_frames_ctx || dst->hw_frames_ctx)
704  return av_hwframe_transfer_data(dst, src, 0);
705 
707  for (i = 0; i < planes; i++)
708  if (!dst->data[i] || !src->data[i])
709  return AVERROR(EINVAL);
710 
711  memcpy(src_data, src->data, sizeof(src_data));
712  av_image_copy(dst->data, dst->linesize,
713  src_data, src->linesize,
714  dst->format, src->width, src->height);
715 
716  return 0;
717 }
718 
719 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
720 {
722  int channels = dst->ch_layout.nb_channels;
723  int planes = planar ? channels : 1;
724  int i;
725 
726 #if FF_API_OLD_CHANNEL_LAYOUT
728  if (!channels || !src->ch_layout.nb_channels) {
729  if (dst->channels != src->channels ||
730  dst->channel_layout != src->channel_layout)
731  return AVERROR(EINVAL);
733  }
734  if (!channels) {
735  channels = dst->channels;
736  planes = planar ? channels : 1;
737  }
739 #endif
740 
741  if (dst->nb_samples != src->nb_samples ||
744  av_channel_layout_check(&src->ch_layout) &&
745 #endif
746  av_channel_layout_compare(&dst->ch_layout, &src->ch_layout))
748  )
749 #endif
750  return AVERROR(EINVAL);
751 
752  for (i = 0; i < planes; i++)
753  if (!dst->extended_data[i] || !src->extended_data[i])
754  return AVERROR(EINVAL);
755 
756  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
757  dst->nb_samples, channels, dst->format);
758 
759  return 0;
760 }
761 
762 int av_frame_copy(AVFrame *dst, const AVFrame *src)
763 {
764  if (dst->format != src->format || dst->format < 0)
765  return AVERROR(EINVAL);
766 
768  if (dst->width > 0 && dst->height > 0)
769  return frame_copy_video(dst, src);
770  else if (dst->nb_samples > 0 &&
773  || dst->channels > 0
774 #endif
775  ))
776  return frame_copy_audio(dst, src);
778 
779  return AVERROR(EINVAL);
780 }
781 
783 {
784  int i;
785 
786  for (i = frame->nb_side_data - 1; i >= 0; i--) {
787  AVFrameSideData *sd = frame->side_data[i];
788  if (sd->type == type) {
789  free_side_data(&frame->side_data[i]);
790  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
791  frame->nb_side_data--;
792  }
793  }
794 }
795 
797 {
798  switch(type) {
799  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
800  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
801  case AV_FRAME_DATA_STEREO3D: return "Stereo 3D";
802  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
803  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
804  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
805  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
806  case AV_FRAME_DATA_AFD: return "Active format description";
807  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
808  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
809  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
810  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
811  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
812  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
813  case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
814  case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
815  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
816  case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
817  case AV_FRAME_DATA_DYNAMIC_HDR_VIVID: return "HDR Dynamic Metadata CUVA 005.1 2021 (Vivid)";
818  case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
819  case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
820  case AV_FRAME_DATA_SEI_UNREGISTERED: return "H.26[45] User Data Unregistered SEI message";
821  case AV_FRAME_DATA_FILM_GRAIN_PARAMS: return "Film grain parameters";
822  case AV_FRAME_DATA_DETECTION_BBOXES: return "Bounding boxes for object detection and classification";
823  case AV_FRAME_DATA_DOVI_RPU_BUFFER: return "Dolby Vision RPU Data";
824  case AV_FRAME_DATA_DOVI_METADATA: return "Dolby Vision Metadata";
825  case AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT: return "Ambient viewing environment";
826  }
827  return NULL;
828 }
829 
830 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
831  const AVPixFmtDescriptor *desc)
832 {
833  int i, j;
834 
835  for (i = 0; frame->data[i]; i++) {
837  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
838  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
839 
840  if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
841  offsets[i] = 0;
842  break;
843  }
844 
845  /* find any component descriptor for this plane */
846  for (j = 0; j < desc->nb_components; j++) {
847  if (desc->comp[j].plane == i) {
848  comp = &desc->comp[j];
849  break;
850  }
851  }
852  if (!comp)
853  return AVERROR_BUG;
854 
855  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
856  (frame->crop_left >> shift_x) * comp->step;
857  }
858 
859  return 0;
860 }
861 
863 {
864  const AVPixFmtDescriptor *desc;
865  size_t offsets[4];
866  int i;
867 
868  if (!(frame->width > 0 && frame->height > 0))
869  return AVERROR(EINVAL);
870 
871  if (frame->crop_left >= INT_MAX - frame->crop_right ||
872  frame->crop_top >= INT_MAX - frame->crop_bottom ||
873  (frame->crop_left + frame->crop_right) >= frame->width ||
874  (frame->crop_top + frame->crop_bottom) >= frame->height)
875  return AVERROR(ERANGE);
876 
877  desc = av_pix_fmt_desc_get(frame->format);
878  if (!desc)
879  return AVERROR_BUG;
880 
881  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
882  * formats cannot be easily handled here either (and corresponding decoders
883  * should not export any cropping anyway), so do the same for those as well.
884  * */
886  frame->width -= frame->crop_right;
887  frame->height -= frame->crop_bottom;
888  frame->crop_right = 0;
889  frame->crop_bottom = 0;
890  return 0;
891  }
892 
893  /* calculate the offsets for each plane */
895 
896  /* adjust the offsets to avoid breaking alignment */
897  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
898  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
899  int min_log2_align = INT_MAX;
900 
901  for (i = 0; frame->data[i]; i++) {
902  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
903  min_log2_align = FFMIN(log2_align, min_log2_align);
904  }
905 
906  /* we assume, and it should always be true, that the data alignment is
907  * related to the cropping alignment by a constant power-of-2 factor */
908  if (log2_crop_align < min_log2_align)
909  return AVERROR_BUG;
910 
911  if (min_log2_align < 5) {
912  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
914  }
915  }
916 
917  for (i = 0; frame->data[i]; i++)
918  frame->data[i] += offsets[i];
919 
920  frame->width -= (frame->crop_left + frame->crop_right);
921  frame->height -= (frame->crop_top + frame->crop_bottom);
922  frame->crop_left = 0;
923  frame->crop_right = 0;
924  frame->crop_top = 0;
925  frame->crop_bottom = 0;
926 
927  return 0;
928 }
AVFrame::extended_buf
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers,...
Definition: frame.h:553
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:596
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:82
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:66
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:592
planes
static const struct @346 planes[]
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:108
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:242
ff_ctz
#define ff_ctz
Definition: intmath.h:107
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:682
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:80
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:670
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:728
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
AV_FRAME_DATA_DOVI_METADATA
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
Definition: frame.h:204
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:184
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:594
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
AVFrame::opaque
void * opaque
for some private data of the user
Definition: frame.h:475
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:603
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:541
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:241
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:437
AVFrame::width
int width
Definition: frame.h:402
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:491
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:561
data
const char data[16]
Definition: mxf.c:146
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:197
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:262
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:177
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:585
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:306
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:539
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:694
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:862
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AVFrame::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:689
AVFrame::chroma_location
enum AVChromaLocation chroma_location
Definition: frame.h:605
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2928
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:68
fail
#define fail()
Definition: checkasm.h:134
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
samplefmt.h
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:422
wipe_side_data
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:75
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:723
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:145
AVFrame::channels
attribute_deprecated int channels
number of audio channels, only used for audio.
Definition: frame.h:662
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:49
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:38
avassert.h
AVFrameSideData::size
size_t size
Definition: frame.h:239
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVFrame::channel_layout
attribute_deprecated uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:524
AVFrame::reordered_opaque
attribute_deprecated int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything).
Definition: frame.h:510
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AVFrame::pkt_pos
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:619
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
av_channel_layout_from_mask
FF_ENABLE_DEPRECATION_WARNINGS int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask)
Initialize a native channel layout from a bitmask indicating which channels are present.
Definition: channel_layout.c:391
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:114
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:114
channels
channels
Definition: aptx.h:31
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:465
AVFrame::crop_right
size_t crop_right
Definition: frame.h:702
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:536
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:211
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:719
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:131
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:57
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:594
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFrame::coded_picture_number
attribute_deprecated int coded_picture_number
picture number in bitstream order
Definition: frame.h:459
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:638
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:444
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:627
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:178
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:700
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:612
AVFrame::crop_left
size_t crop_left
Definition: frame.h:701
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:427
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:77
AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
@ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
Ambient viewing environment metadata, as defined by H.274.
Definition: frame.h:216
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:762
cpu.h
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:470
AVFrame::display_picture_number
attribute_deprecated int display_picture_number
picture number in display order
Definition: frame.h:464
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:516
FF_API_OLD_CHANNEL_LAYOUT
#define FF_API_OLD_CHANNEL_LAYOUT
Definition: version.h:111
size
int size
Definition: twinvq_data.h:10344
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:331
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:452
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:124
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:524
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:681
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
frame.h
buffer.h
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:411
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:782
AV_CHANNEL_ORDER_NATIVE
@ AV_CHANNEL_ORDER_NATIVE
The native channel order, i.e.
Definition: channel_layout.h:118
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:932
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
AVFrame::private_ref
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:718
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:109
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:486
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:222
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:410
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:391
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:64
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:507
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
AVFrame::pkt_duration
attribute_deprecated int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:631
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:590
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:147
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:648
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:906
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:432
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:448
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:678
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:159
AVFrame::height
int height
Definition: frame.h:402
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
channel_layout.h
AVFrame::palette_has_changed
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:496
AV_FRAME_DATA_VIDEO_ENC_PARAMS
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:170
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:639
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:632
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:237
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVFrame::pkt_size
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:672
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:639
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
desc
const char * desc
Definition: libsvtav1.c:83
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:934
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:830
AVFrame::crop_top
size_t crop_top
Definition: frame.h:699
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:796
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:165
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:240
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:599
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
CHECK_CHANNELS_CONSISTENCY
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:32
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:507
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
AVFrame::repeat_pict
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:481
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:190
AVFrame::nb_extended_buf
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:557
av_realloc
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:153