FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 
29 MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
30 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
31 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
32 MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
33 MAKE_ACCESSORS(AVFrame, frame, int, channels)
34 MAKE_ACCESSORS(AVFrame, frame, int, sample_rate)
35 MAKE_ACCESSORS(AVFrame, frame, AVDictionary *, metadata)
36 MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
37 MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
38 MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
39 MAKE_ACCESSORS(AVFrame, frame, enum AVColorRange, color_range)
40 
41 #define CHECK_CHANNELS_CONSISTENCY(frame) \
42  av_assert2(!(frame)->channel_layout || \
43  (frame)->channels == \
44  av_get_channel_layout_nb_channels((frame)->channel_layout))
45 
46 AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame) {return &frame->metadata;};
47 
48 #if FF_API_FRAME_QP
49 int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
50 {
52 
53  f->qp_table_buf = buf;
54 
56  f->qscale_table = buf->data;
57  f->qstride = stride;
58  f->qscale_type = qp_type;
60 
61  return 0;
62 }
63 
64 int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
65 {
67  *stride = f->qstride;
68  *type = f->qscale_type;
70 
71  if (!f->qp_table_buf)
72  return NULL;
73 
74  return f->qp_table_buf->data;
75 }
76 #endif
77 
78 const char *av_get_colorspace_name(enum AVColorSpace val)
79 {
80  static const char * const name[] = {
81  [AVCOL_SPC_RGB] = "GBR",
82  [AVCOL_SPC_BT709] = "bt709",
83  [AVCOL_SPC_FCC] = "fcc",
84  [AVCOL_SPC_BT470BG] = "bt470bg",
85  [AVCOL_SPC_SMPTE170M] = "smpte170m",
86  [AVCOL_SPC_SMPTE240M] = "smpte240m",
87  [AVCOL_SPC_YCOCG] = "YCgCo",
88  };
89  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
90  return NULL;
91  return name[val];
92 }
93 
94 static void get_frame_defaults(AVFrame *frame)
95 {
96  if (frame->extended_data != frame->data)
97  av_freep(&frame->extended_data);
98 
99  memset(frame, 0, sizeof(*frame));
100 
101  frame->pts =
102  frame->pkt_dts = AV_NOPTS_VALUE;
103 #if FF_API_PKT_PTS
105  frame->pkt_pts = AV_NOPTS_VALUE;
107 #endif
109  frame->pkt_duration = 0;
110  frame->pkt_pos = -1;
111  frame->pkt_size = -1;
112  frame->key_frame = 1;
113  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
114  frame->format = -1; /* unknown */
115  frame->extended_data = frame->data;
121  frame->flags = 0;
122 }
123 
124 static void free_side_data(AVFrameSideData **ptr_sd)
125 {
126  AVFrameSideData *sd = *ptr_sd;
127 
128  av_buffer_unref(&sd->buf);
129  av_dict_free(&sd->metadata);
130  av_freep(ptr_sd);
131 }
132 
133 static void wipe_side_data(AVFrame *frame)
134 {
135  int i;
136 
137  for (i = 0; i < frame->nb_side_data; i++) {
138  free_side_data(&frame->side_data[i]);
139  }
140  frame->nb_side_data = 0;
141 
142  av_freep(&frame->side_data);
143 }
144 
145 AVFrame *av_frame_alloc(void)
146 {
147  AVFrame *frame = av_mallocz(sizeof(*frame));
148 
149  if (!frame)
150  return NULL;
151 
152  frame->extended_data = NULL;
153  get_frame_defaults(frame);
154 
155  return frame;
156 }
157 
158 void av_frame_free(AVFrame **frame)
159 {
160  if (!frame || !*frame)
161  return;
162 
163  av_frame_unref(*frame);
164  av_freep(frame);
165 }
166 
167 static int get_video_buffer(AVFrame *frame, int align)
168 {
170  int ret, i;
171 
172  if (!desc)
173  return AVERROR(EINVAL);
174 
175  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
176  return ret;
177 
178  if (!frame->linesize[0]) {
179  for(i=1; i<=align; i+=i) {
180  ret = av_image_fill_linesizes(frame->linesize, frame->format,
181  FFALIGN(frame->width, i));
182  if (ret < 0)
183  return ret;
184  if (!(frame->linesize[0] & (align-1)))
185  break;
186  }
187 
188  for (i = 0; i < 4 && frame->linesize[i]; i++)
189  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
190  }
191 
192  for (i = 0; i < 4 && frame->linesize[i]; i++) {
193  int h = FFALIGN(frame->height, 32);
194  if (i == 1 || i == 2)
195  h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
196 
197  frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
198  if (!frame->buf[i])
199  goto fail;
200 
201  frame->data[i] = frame->buf[i]->data;
202  }
203  if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
204  av_buffer_unref(&frame->buf[1]);
205  frame->buf[1] = av_buffer_alloc(AVPALETTE_SIZE);
206  if (!frame->buf[1])
207  goto fail;
208  frame->data[1] = frame->buf[1]->data;
209  }
210 
211  frame->extended_data = frame->data;
212 
213  return 0;
214 fail:
215  av_frame_unref(frame);
216  return AVERROR(ENOMEM);
217 }
218 
219 static int get_audio_buffer(AVFrame *frame, int align)
220 {
221  int channels;
222  int planar = av_sample_fmt_is_planar(frame->format);
223  int planes;
224  int ret, i;
225 
226  if (!frame->channels)
228 
229  channels = frame->channels;
230  planes = planar ? channels : 1;
231 
233  if (!frame->linesize[0]) {
234  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
235  frame->nb_samples, frame->format,
236  align);
237  if (ret < 0)
238  return ret;
239  }
240 
241  if (planes > AV_NUM_DATA_POINTERS) {
242  frame->extended_data = av_mallocz_array(planes,
243  sizeof(*frame->extended_data));
245  sizeof(*frame->extended_buf));
246  if (!frame->extended_data || !frame->extended_buf) {
247  av_freep(&frame->extended_data);
248  av_freep(&frame->extended_buf);
249  return AVERROR(ENOMEM);
250  }
251  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
252  } else
253  frame->extended_data = frame->data;
254 
255  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
256  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
257  if (!frame->buf[i]) {
258  av_frame_unref(frame);
259  return AVERROR(ENOMEM);
260  }
261  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
262  }
263  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
264  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
265  if (!frame->extended_buf[i]) {
266  av_frame_unref(frame);
267  return AVERROR(ENOMEM);
268  }
269  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
270  }
271  return 0;
272 
273 }
274 
275 int av_frame_get_buffer(AVFrame *frame, int align)
276 {
277  if (frame->format < 0)
278  return AVERROR(EINVAL);
279 
280  if (frame->width > 0 && frame->height > 0)
281  return get_video_buffer(frame, align);
282  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
283  return get_audio_buffer(frame, align);
284 
285  return AVERROR(EINVAL);
286 }
287 
288 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
289 {
290  int i;
291 
292  dst->key_frame = src->key_frame;
293  dst->pict_type = src->pict_type;
295  dst->pts = src->pts;
296  dst->repeat_pict = src->repeat_pict;
298  dst->top_field_first = src->top_field_first;
300  dst->sample_rate = src->sample_rate;
301  dst->opaque = src->opaque;
302 #if FF_API_PKT_PTS
304  dst->pkt_pts = src->pkt_pts;
306 #endif
307  dst->pkt_dts = src->pkt_dts;
308  dst->pkt_pos = src->pkt_pos;
309  dst->pkt_size = src->pkt_size;
310  dst->pkt_duration = src->pkt_duration;
312  dst->quality = src->quality;
316  dst->flags = src->flags;
318  dst->color_primaries = src->color_primaries;
319  dst->color_trc = src->color_trc;
320  dst->colorspace = src->colorspace;
321  dst->color_range = src->color_range;
322  dst->chroma_location = src->chroma_location;
323 
324  av_dict_copy(&dst->metadata, src->metadata, 0);
325 
326 #if FF_API_ERROR_FRAME
328  memcpy(dst->error, src->error, sizeof(dst->error));
330 #endif
331 
332  for (i = 0; i < src->nb_side_data; i++) {
333  const AVFrameSideData *sd_src = src->side_data[i];
334  AVFrameSideData *sd_dst;
335  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
336  && (src->width != dst->width || src->height != dst->height))
337  continue;
338  if (force_copy) {
339  sd_dst = av_frame_new_side_data(dst, sd_src->type,
340  sd_src->size);
341  if (!sd_dst) {
342  wipe_side_data(dst);
343  return AVERROR(ENOMEM);
344  }
345  memcpy(sd_dst->data, sd_src->data, sd_src->size);
346  } else {
347  sd_dst = av_frame_new_side_data(dst, sd_src->type, 0);
348  if (!sd_dst) {
349  wipe_side_data(dst);
350  return AVERROR(ENOMEM);
351  }
352  if (sd_src->buf) {
353  sd_dst->buf = av_buffer_ref(sd_src->buf);
354  if (!sd_dst->buf) {
355  wipe_side_data(dst);
356  return AVERROR(ENOMEM);
357  }
358  sd_dst->data = sd_dst->buf->data;
359  sd_dst->size = sd_dst->buf->size;
360  }
361  }
362  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
363  }
364 
365 #if FF_API_FRAME_QP
367  dst->qscale_table = NULL;
368  dst->qstride = 0;
369  dst->qscale_type = 0;
371  if (src->qp_table_buf) {
373  if (dst->qp_table_buf) {
374  dst->qscale_table = dst->qp_table_buf->data;
375  dst->qstride = src->qstride;
376  dst->qscale_type = src->qscale_type;
377  }
378  }
380 #endif
381 
382  return 0;
383 }
384 
385 int av_frame_ref(AVFrame *dst, const AVFrame *src)
386 {
387  int i, ret = 0;
388 
389  av_assert1(dst->width == 0 && dst->height == 0);
390  av_assert1(dst->channels == 0);
391 
392  dst->format = src->format;
393  dst->width = src->width;
394  dst->height = src->height;
395  dst->channels = src->channels;
396  dst->channel_layout = src->channel_layout;
397  dst->nb_samples = src->nb_samples;
398 
399  ret = frame_copy_props(dst, src, 0);
400  if (ret < 0)
401  return ret;
402 
403  /* duplicate the frame data if it's not refcounted */
404  if (!src->buf[0]) {
405  ret = av_frame_get_buffer(dst, 32);
406  if (ret < 0)
407  return ret;
408 
409  ret = av_frame_copy(dst, src);
410  if (ret < 0)
411  av_frame_unref(dst);
412 
413  return ret;
414  }
415 
416  /* ref the buffers */
417  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
418  if (!src->buf[i])
419  continue;
420  dst->buf[i] = av_buffer_ref(src->buf[i]);
421  if (!dst->buf[i]) {
422  ret = AVERROR(ENOMEM);
423  goto fail;
424  }
425  }
426 
427  if (src->extended_buf) {
428  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
429  src->nb_extended_buf);
430  if (!dst->extended_buf) {
431  ret = AVERROR(ENOMEM);
432  goto fail;
433  }
434  dst->nb_extended_buf = src->nb_extended_buf;
435 
436  for (i = 0; i < src->nb_extended_buf; i++) {
437  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
438  if (!dst->extended_buf[i]) {
439  ret = AVERROR(ENOMEM);
440  goto fail;
441  }
442  }
443  }
444 
445  if (src->hw_frames_ctx) {
447  if (!dst->hw_frames_ctx) {
448  ret = AVERROR(ENOMEM);
449  goto fail;
450  }
451  }
452 
453  /* duplicate extended data */
454  if (src->extended_data != src->data) {
455  int ch = src->channels;
456 
457  if (!ch) {
458  ret = AVERROR(EINVAL);
459  goto fail;
460  }
462 
463  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
464  if (!dst->extended_data) {
465  ret = AVERROR(ENOMEM);
466  goto fail;
467  }
468  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
469  } else
470  dst->extended_data = dst->data;
471 
472  memcpy(dst->data, src->data, sizeof(src->data));
473  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
474 
475  return 0;
476 
477 fail:
478  av_frame_unref(dst);
479  return ret;
480 }
481 
482 AVFrame *av_frame_clone(const AVFrame *src)
483 {
484  AVFrame *ret = av_frame_alloc();
485 
486  if (!ret)
487  return NULL;
488 
489  if (av_frame_ref(ret, src) < 0)
490  av_frame_free(&ret);
491 
492  return ret;
493 }
494 
495 void av_frame_unref(AVFrame *frame)
496 {
497  int i;
498 
499  if (!frame)
500  return;
501 
502  wipe_side_data(frame);
503 
504  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
505  av_buffer_unref(&frame->buf[i]);
506  for (i = 0; i < frame->nb_extended_buf; i++)
507  av_buffer_unref(&frame->extended_buf[i]);
508  av_freep(&frame->extended_buf);
509  av_dict_free(&frame->metadata);
510 #if FF_API_FRAME_QP
511  av_buffer_unref(&frame->qp_table_buf);
512 #endif
513 
515 
516  get_frame_defaults(frame);
517 }
518 
519 void av_frame_move_ref(AVFrame *dst, AVFrame *src)
520 {
521  av_assert1(dst->width == 0 && dst->height == 0);
522  av_assert1(dst->channels == 0);
523 
524  *dst = *src;
525  if (src->extended_data == src->data)
526  dst->extended_data = dst->data;
527  memset(src, 0, sizeof(*src));
528  get_frame_defaults(src);
529 }
530 
531 int av_frame_is_writable(AVFrame *frame)
532 {
533  int i, ret = 1;
534 
535  /* assume non-refcounted frames are not writable */
536  if (!frame->buf[0])
537  return 0;
538 
539  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
540  if (frame->buf[i])
541  ret &= !!av_buffer_is_writable(frame->buf[i]);
542  for (i = 0; i < frame->nb_extended_buf; i++)
543  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
544 
545  return ret;
546 }
547 
548 int av_frame_make_writable(AVFrame *frame)
549 {
550  AVFrame tmp;
551  int ret;
552 
553  if (!frame->buf[0])
554  return AVERROR(EINVAL);
555 
556  if (av_frame_is_writable(frame))
557  return 0;
558 
559  memset(&tmp, 0, sizeof(tmp));
560  tmp.format = frame->format;
561  tmp.width = frame->width;
562  tmp.height = frame->height;
563  tmp.channels = frame->channels;
564  tmp.channel_layout = frame->channel_layout;
565  tmp.nb_samples = frame->nb_samples;
566  ret = av_frame_get_buffer(&tmp, 32);
567  if (ret < 0)
568  return ret;
569 
570  ret = av_frame_copy(&tmp, frame);
571  if (ret < 0) {
572  av_frame_unref(&tmp);
573  return ret;
574  }
575 
576  ret = av_frame_copy_props(&tmp, frame);
577  if (ret < 0) {
578  av_frame_unref(&tmp);
579  return ret;
580  }
581 
582  av_frame_unref(frame);
583 
584  *frame = tmp;
585  if (tmp.data == tmp.extended_data)
586  frame->extended_data = frame->data;
587 
588  return 0;
589 }
590 
591 int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
592 {
593  return frame_copy_props(dst, src, 1);
594 }
595 
597 {
598  uint8_t *data;
599  int planes, i;
600 
601  if (frame->nb_samples) {
602  int channels = frame->channels;
603  if (!channels)
604  return NULL;
606  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
607  } else
608  planes = 4;
609 
610  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
611  return NULL;
612  data = frame->extended_data[plane];
613 
614  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
615  AVBufferRef *buf = frame->buf[i];
616  if (data >= buf->data && data < buf->data + buf->size)
617  return buf;
618  }
619  for (i = 0; i < frame->nb_extended_buf; i++) {
620  AVBufferRef *buf = frame->extended_buf[i];
621  if (data >= buf->data && data < buf->data + buf->size)
622  return buf;
623  }
624  return NULL;
625 }
626 
629  int size)
630 {
631  AVFrameSideData *ret, **tmp;
632 
633  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
634  return NULL;
635 
636  tmp = av_realloc(frame->side_data,
637  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
638  if (!tmp)
639  return NULL;
640  frame->side_data = tmp;
641 
642  ret = av_mallocz(sizeof(*ret));
643  if (!ret)
644  return NULL;
645 
646  if (size > 0) {
647  ret->buf = av_buffer_alloc(size);
648  if (!ret->buf) {
649  av_freep(&ret);
650  return NULL;
651  }
652 
653  ret->data = ret->buf->data;
654  ret->size = size;
655  }
656  ret->type = type;
657 
658  frame->side_data[frame->nb_side_data++] = ret;
659 
660  return ret;
661 }
662 
665 {
666  int i;
667 
668  for (i = 0; i < frame->nb_side_data; i++) {
669  if (frame->side_data[i]->type == type)
670  return frame->side_data[i];
671  }
672  return NULL;
673 }
674 
675 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
676 {
677  const uint8_t *src_data[4];
678  int i, planes;
679 
680  if (dst->width < src->width ||
681  dst->height < src->height)
682  return AVERROR(EINVAL);
683 
684  planes = av_pix_fmt_count_planes(dst->format);
685  for (i = 0; i < planes; i++)
686  if (!dst->data[i] || !src->data[i])
687  return AVERROR(EINVAL);
688 
689  memcpy(src_data, src->data, sizeof(src_data));
690  av_image_copy(dst->data, dst->linesize,
691  src_data, src->linesize,
692  dst->format, src->width, src->height);
693 
694  return 0;
695 }
696 
697 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
698 {
700  int channels = dst->channels;
701  int planes = planar ? channels : 1;
702  int i;
703 
704  if (dst->nb_samples != src->nb_samples ||
705  dst->channels != src->channels ||
706  dst->channel_layout != src->channel_layout)
707  return AVERROR(EINVAL);
708 
710 
711  for (i = 0; i < planes; i++)
712  if (!dst->extended_data[i] || !src->extended_data[i])
713  return AVERROR(EINVAL);
714 
716  dst->nb_samples, channels, dst->format);
717 
718  return 0;
719 }
720 
721 int av_frame_copy(AVFrame *dst, const AVFrame *src)
722 {
723  if (dst->format != src->format || dst->format < 0)
724  return AVERROR(EINVAL);
725 
726  if (dst->width > 0 && dst->height > 0)
727  return frame_copy_video(dst, src);
728  else if (dst->nb_samples > 0 && dst->channel_layout)
729  return frame_copy_audio(dst, src);
730 
731  return AVERROR(EINVAL);
732 }
733 
735 {
736  int i;
737 
738  for (i = 0; i < frame->nb_side_data; i++) {
739  AVFrameSideData *sd = frame->side_data[i];
740  if (sd->type == type) {
741  free_side_data(&frame->side_data[i]);
742  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
743  frame->nb_side_data--;
744  }
745  }
746 }
747 
749 {
750  switch(type) {
751  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
752  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
753  case AV_FRAME_DATA_STEREO3D: return "Stereoscopic 3d metadata";
754  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
755  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
756  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
757  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
758  case AV_FRAME_DATA_AFD: return "Active format description";
759  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
760  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
761  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
762  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
763  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
764  }
765  return NULL;
766 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:452
int plane
Definition: avisynth_c.h:422
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:771
#define AV_NUM_DATA_POINTERS
Definition: frame.h:185
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2332
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:145
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:112
attribute_deprecated int qscale_type
Definition: frame.h:523
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder Code outside libavutil shou...
Definition: frame.h:455
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2372
Memory handling functions.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:367
const char * desc
Definition: nvenc.c:60
AVDictionary * metadata
Definition: frame.h:147
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:456
void * opaque
for some private data of the user
Definition: frame.h:303
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:385
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:317
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:457
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:49
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
Mastering display metadata associated with a video frame.
Definition: frame.h:118
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:133
color_range
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:519
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
#define src
Definition: vp8dsp.c:254
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:64
Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16.
Definition: pixfmt.h:459
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:451
attribute_deprecated int8_t * qscale_table
QP table Not to be accessed directly from outside libavutil.
Definition: frame.h:514
functionally identical to above
Definition: pixfmt.h:458
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:663
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:534
Public dictionary API.
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:145
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:675
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:450
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
attribute_deprecated int qstride
QP store stride Not to be accessed directly from outside libavutil.
Definition: frame.h:520
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:385
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
AVBufferRef * buf
Definition: frame.h:148
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:51
static AVFrame * frame
Structure to hold side data for an AVFrame.
Definition: frame.h:143
AVDictionary * metadata
metadata.
Definition: frame.h:474
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:322
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:471
ptrdiff_t size
Definition: opengl_enc.c:101
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:88
Metadata relevant to a downmix procedure.
Definition: frame.h:71
int nb_side_data
Definition: frame.h:388
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:310
AVFrameSideData ** side_data
Definition: frame.h:387
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
int width
width and height of the video frame
Definition: frame.h:236
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:87
#define AVERROR(e)
Definition: error.h:43
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:288
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:158
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:422
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:57
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:435
simple assert() macros that are a bit more flexible than ISO C assert().
The GOP timecode in 25 bit timecode format.
Definition: frame.h:123
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:219
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:226
#define fail()
Definition: checkasm.h:84
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:302
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:721
reference-counted frame API
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:353
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:251
int channels
number of audio channels, only used for audio.
Definition: frame.h:496
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:413
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:293
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:381
#define AV_PIX_FMT_FLAG_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:158
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:95
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:596
AVFrameSideDataType
Definition: frame.h:47
AVBufferRef * qp_table_buf
Not to be accessed directly from outside libavutil.
Definition: frame.h:528
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:298
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:132
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:78
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:455
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:482
#define FF_ARRAY_ELEMS(a)
const AVS_VideoInfo int align
Definition: avisynth_c.h:795
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
int coded_picture_number
picture number in bitstream order
Definition: frame.h:289
sample_rate
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:465
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:83
AVDictionary ** avpriv_frame_get_metadatap(AVFrame *frame)
Definition: frame.c:46
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:531
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:66
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:167
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:263
uint8_t * data
Definition: frame.h:145
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
If side data of the supplied type exists in the frame, free it and remove it from the frame...
Definition: frame.c:734
void * buf
Definition: avisynth_c.h:690
GLint GLenum type
Definition: opengl_enc.c:105
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:343
int sample_rate
Sample rate of the audio data.
Definition: frame.h:348
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:88
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:627
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:332
refcounted data buffer API
enum AVChromaLocation chroma_location
Definition: frame.h:437
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base Code outside libavutil should...
Definition: frame.h:446
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:485
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:697
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:748
int size
Size of data in bytes.
Definition: buffer.h:93
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:275
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:495
enum AVFrameSideDataType type
Definition: frame.h:144
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:548
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:276
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:284
A reference to a data buffer.
Definition: buffer.h:81
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
common internal and external API header
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:41
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:327
static uint8_t tmp[8]
Definition: des.c:38
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
enum AVColorPrimaries color_primaries
Definition: frame.h:424
int height
Definition: frame.h:236
#define av_freep(p)
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:124
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:426
Recommmends skipping the specified number of samples.
Definition: frame.h:107
#define av_malloc_array(a, b)
#define stride
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:75
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:231
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:94
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:506
Stereoscopic 3d metadata.
Definition: frame.h:62
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:241
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:66
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:591
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:242
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
const char * name
Definition: opengl_enc.c:103