FFmpeg
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40 
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwconfig.h"
45 #include "internal.h"
46 #include "packet_internal.h"
47 #include "thread.h"
48 
49 typedef struct FramePool {
50  /**
51  * Pools for each data plane. For audio all the planes have the same size,
52  * so only pools[0] is used.
53  */
55 
56  /*
57  * Pool parameters
58  */
59  int format;
60  int width, height;
62  int linesize[4];
63  int planes;
64  int channels;
65  int samples;
66 } FramePool;
67 
68 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
69 {
70  int size, ret;
71  const uint8_t *data;
72  uint32_t flags;
73  int64_t val;
74 
76  if (!data)
77  return 0;
78 
79  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
80  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
81  "changes, but PARAM_CHANGE side data was sent to it.\n");
82  ret = AVERROR(EINVAL);
83  goto fail2;
84  }
85 
86  if (size < 4)
87  goto fail;
88 
89  flags = bytestream_get_le32(&data);
90  size -= 4;
91 
93  if (size < 4)
94  goto fail;
95  val = bytestream_get_le32(&data);
96  if (val <= 0 || val > INT_MAX) {
97  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
98  ret = AVERROR_INVALIDDATA;
99  goto fail2;
100  }
101  avctx->channels = val;
102  size -= 4;
103  }
105  if (size < 8)
106  goto fail;
107  avctx->channel_layout = bytestream_get_le64(&data);
108  size -= 8;
109  }
111  if (size < 4)
112  goto fail;
113  val = bytestream_get_le32(&data);
114  if (val <= 0 || val > INT_MAX) {
115  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
116  ret = AVERROR_INVALIDDATA;
117  goto fail2;
118  }
119  avctx->sample_rate = val;
120  size -= 4;
121  }
123  if (size < 8)
124  goto fail;
125  avctx->width = bytestream_get_le32(&data);
126  avctx->height = bytestream_get_le32(&data);
127  size -= 8;
128  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
129  if (ret < 0)
130  goto fail2;
131  }
132 
133  return 0;
134 fail:
135  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
136  ret = AVERROR_INVALIDDATA;
137 fail2:
138  if (ret < 0) {
139  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
140  if (avctx->err_recognition & AV_EF_EXPLODE)
141  return ret;
142  }
143  return 0;
144 }
145 
146 #define IS_EMPTY(pkt) (!(pkt)->data)
147 
149 {
150  int ret = 0;
151 
152  ret = avpriv_packet_list_put(&avci->pkt_props, &avci->pkt_props_tail, pkt,
154  if (ret < 0)
155  return ret;
156  avci->pkt_props_tail->pkt.size = pkt->size; // HACK: Needed for ff_decode_frame_props().
157  avci->pkt_props_tail->pkt.data = (void*)1; // HACK: Needed for IS_EMPTY().
158 
159  if (IS_EMPTY(avci->last_pkt_props)) {
160  ret = avpriv_packet_list_get(&avci->pkt_props,
161  &avci->pkt_props_tail,
162  avci->last_pkt_props);
163  av_assert0(ret != AVERROR(EAGAIN));
164  }
165  return ret;
166 }
167 
169 {
170  int ret;
171 
172  /* move the original frame to our backup */
173  av_frame_unref(avci->to_free);
174  av_frame_move_ref(avci->to_free, frame);
175 
176  /* now copy everything except the AVBufferRefs back
177  * note that we make a COPY of the side data, so calling av_frame_free() on
178  * the caller's frame will work properly */
179  ret = av_frame_copy_props(frame, avci->to_free);
180  if (ret < 0)
181  return ret;
182 
183  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
184  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
185  if (avci->to_free->extended_data != avci->to_free->data) {
186  int planes = avci->to_free->channels;
187  int size = planes * sizeof(*frame->extended_data);
188 
189  if (!size) {
190  av_frame_unref(frame);
191  return AVERROR_BUG;
192  }
193 
194  frame->extended_data = av_malloc(size);
195  if (!frame->extended_data) {
196  av_frame_unref(frame);
197  return AVERROR(ENOMEM);
198  }
199  memcpy(frame->extended_data, avci->to_free->extended_data,
200  size);
201  } else
202  frame->extended_data = frame->data;
203 
204  frame->format = avci->to_free->format;
205  frame->width = avci->to_free->width;
206  frame->height = avci->to_free->height;
207  frame->channel_layout = avci->to_free->channel_layout;
208  frame->nb_samples = avci->to_free->nb_samples;
209  frame->channels = avci->to_free->channels;
210 
211  return 0;
212 }
213 
215 {
216  AVCodecInternal *avci = avctx->internal;
217  int ret;
218 
219  if (avci->bsf)
220  return 0;
221 
222  ret = av_bsf_list_parse_str(avctx->codec->bsfs, &avci->bsf);
223  if (ret < 0) {
224  av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", avctx->codec->bsfs, av_err2str(ret));
225  if (ret != AVERROR(ENOMEM))
226  ret = AVERROR_BUG;
227  goto fail;
228  }
229 
230  /* We do not currently have an API for passing the input timebase into decoders,
231  * but no filters used here should actually need it.
232  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
233  avci->bsf->time_base_in = (AVRational){ 1, 90000 };
234  ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx);
235  if (ret < 0)
236  goto fail;
237 
238  ret = av_bsf_init(avci->bsf);
239  if (ret < 0)
240  goto fail;
241 
242  return 0;
243 fail:
244  av_bsf_free(&avci->bsf);
245  return ret;
246 }
247 
249 {
250  AVCodecInternal *avci = avctx->internal;
251  int ret;
252 
253  if (avci->draining)
254  return AVERROR_EOF;
255 
256  ret = av_bsf_receive_packet(avci->bsf, pkt);
257  if (ret == AVERROR_EOF)
258  avci->draining = 1;
259  if (ret < 0)
260  return ret;
261 
262  ret = extract_packet_props(avctx->internal, pkt);
263  if (ret < 0)
264  goto finish;
265 
266  ret = apply_param_change(avctx, pkt);
267  if (ret < 0)
268  goto finish;
269 
270  if (avctx->codec->receive_frame)
271  avci->compat_decode_consumed += pkt->size;
272 
273  return 0;
274 finish:
275  av_packet_unref(pkt);
276  return ret;
277 }
278 
279 /**
280  * Attempt to guess proper monotonic timestamps for decoded video frames
281  * which might have incorrect times. Input timestamps may wrap around, in
282  * which case the output will as well.
283  *
284  * @param pts the pts field of the decoded AVPacket, as passed through
285  * AVFrame.pts
286  * @param dts the dts field of the decoded AVPacket
287  * @return one of the input values, may be AV_NOPTS_VALUE
288  */
290  int64_t reordered_pts, int64_t dts)
291 {
292  int64_t pts = AV_NOPTS_VALUE;
293 
294  if (dts != AV_NOPTS_VALUE) {
296  ctx->pts_correction_last_dts = dts;
297  } else if (reordered_pts != AV_NOPTS_VALUE)
298  ctx->pts_correction_last_dts = reordered_pts;
299 
300  if (reordered_pts != AV_NOPTS_VALUE) {
301  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
302  ctx->pts_correction_last_pts = reordered_pts;
303  } else if(dts != AV_NOPTS_VALUE)
304  ctx->pts_correction_last_pts = dts;
305 
307  && reordered_pts != AV_NOPTS_VALUE)
308  pts = reordered_pts;
309  else
310  pts = dts;
311 
312  return pts;
313 }
314 
315 /*
316  * The core of the receive_frame_wrapper for the decoders implementing
317  * the simple API. Certain decoders might consume partial packets without
318  * returning any output, so this function needs to be called in a loop until it
319  * returns EAGAIN.
320  **/
321 static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
322 {
323  AVCodecInternal *avci = avctx->internal;
324  DecodeSimpleContext *ds = &avci->ds;
325  AVPacket *pkt = ds->in_pkt;
326  // copy to ensure we do not change pkt
327  int got_frame, actual_got_frame;
328  int ret;
329 
330  if (!pkt->data && !avci->draining) {
331  av_packet_unref(pkt);
332  ret = ff_decode_get_packet(avctx, pkt);
333  if (ret < 0 && ret != AVERROR_EOF)
334  return ret;
335  }
336 
337  // Some codecs (at least wma lossless) will crash when feeding drain packets
338  // after EOF was signaled.
339  if (avci->draining_done)
340  return AVERROR_EOF;
341 
342  if (!pkt->data &&
343  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
345  return AVERROR_EOF;
346 
347  got_frame = 0;
348 
349  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
350  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
351  } else {
352  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
353 
355  frame->pkt_dts = pkt->dts;
356  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
357  if(!avctx->has_b_frames)
358  frame->pkt_pos = pkt->pos;
359  //FIXME these should be under if(!avctx->has_b_frames)
360  /* get_buffer is supposed to set frame parameters */
361  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
362  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
363  if (!frame->width) frame->width = avctx->width;
364  if (!frame->height) frame->height = avctx->height;
365  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
366  }
367  }
368  }
369  emms_c();
370  actual_got_frame = got_frame;
371 
372  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
373  if (frame->flags & AV_FRAME_FLAG_DISCARD)
374  got_frame = 0;
375  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
376  uint8_t *side;
377  int side_size;
378  uint32_t discard_padding = 0;
379  uint8_t skip_reason = 0;
380  uint8_t discard_reason = 0;
381 
382  if (ret >= 0 && got_frame) {
383  if (frame->format == AV_SAMPLE_FMT_NONE)
384  frame->format = avctx->sample_fmt;
385  if (!frame->channel_layout)
386  frame->channel_layout = avctx->channel_layout;
387  if (!frame->channels)
388  frame->channels = avctx->channels;
389  if (!frame->sample_rate)
390  frame->sample_rate = avctx->sample_rate;
391  }
392 
394  if(side && side_size>=10) {
395  avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier;
396  discard_padding = AV_RL32(side + 4);
397  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
398  avci->skip_samples, (int)discard_padding);
399  skip_reason = AV_RL8(side + 8);
400  discard_reason = AV_RL8(side + 9);
401  }
402 
403  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
404  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
405  avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
406  got_frame = 0;
407  *discarded_samples += frame->nb_samples;
408  }
409 
410  if (avci->skip_samples > 0 && got_frame &&
411  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
412  if(frame->nb_samples <= avci->skip_samples){
413  got_frame = 0;
414  *discarded_samples += frame->nb_samples;
415  avci->skip_samples -= frame->nb_samples;
416  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
417  avci->skip_samples);
418  } else {
419  av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
420  frame->nb_samples - avci->skip_samples, avctx->channels, frame->format);
421  if(avctx->pkt_timebase.num && avctx->sample_rate) {
422  int64_t diff_ts = av_rescale_q(avci->skip_samples,
423  (AVRational){1, avctx->sample_rate},
424  avctx->pkt_timebase);
425  if(frame->pts!=AV_NOPTS_VALUE)
426  frame->pts += diff_ts;
427 #if FF_API_PKT_PTS
429  if(frame->pkt_pts!=AV_NOPTS_VALUE)
430  frame->pkt_pts += diff_ts;
432 #endif
433  if(frame->pkt_dts!=AV_NOPTS_VALUE)
434  frame->pkt_dts += diff_ts;
435  if (frame->pkt_duration >= diff_ts)
436  frame->pkt_duration -= diff_ts;
437  } else {
438  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
439  }
440  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
441  avci->skip_samples, frame->nb_samples);
442  *discarded_samples += avci->skip_samples;
443  frame->nb_samples -= avci->skip_samples;
444  avci->skip_samples = 0;
445  }
446  }
447 
448  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
449  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
450  if (discard_padding == frame->nb_samples) {
451  *discarded_samples += frame->nb_samples;
452  got_frame = 0;
453  } else {
454  if(avctx->pkt_timebase.num && avctx->sample_rate) {
455  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
456  (AVRational){1, avctx->sample_rate},
457  avctx->pkt_timebase);
458  frame->pkt_duration = diff_ts;
459  } else {
460  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
461  }
462  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
463  (int)discard_padding, frame->nb_samples);
464  frame->nb_samples -= discard_padding;
465  }
466  }
467 
468  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
470  if (fside) {
471  AV_WL32(fside->data, avci->skip_samples);
472  AV_WL32(fside->data + 4, discard_padding);
473  AV_WL8(fside->data + 8, skip_reason);
474  AV_WL8(fside->data + 9, discard_reason);
475  avci->skip_samples = 0;
476  }
477  }
478  }
479 
480  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
482  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
483  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
484  avci->showed_multi_packet_warning = 1;
485  }
486 
487  if (!got_frame)
488  av_frame_unref(frame);
489 
490  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
491  ret = pkt->size;
492 
493 #if FF_API_AVCTX_TIMEBASE
494  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
495  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
496 #endif
497 
498  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
499  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
500  if (avci->draining && !actual_got_frame) {
501  if (ret < 0) {
502  /* prevent infinite loop if a decoder wrongly always return error on draining */
503  /* reasonable nb_errors_max = maximum b frames + thread count */
504  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
505  avctx->thread_count : 1);
506 
507  if (avci->nb_draining_errors++ >= nb_errors_max) {
508  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
509  "Stop draining and force EOF.\n");
510  avci->draining_done = 1;
511  ret = AVERROR_BUG;
512  }
513  } else {
514  avci->draining_done = 1;
515  }
516  }
517 
518  avci->compat_decode_consumed += ret;
519 
520  if (ret >= pkt->size || ret < 0) {
521  av_packet_unref(pkt);
523  } else {
524  int consumed = ret;
525 
526  pkt->data += consumed;
527  pkt->size -= consumed;
528  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
529  pkt->pts = AV_NOPTS_VALUE;
530  pkt->dts = AV_NOPTS_VALUE;
533  }
534 
535  if (got_frame)
536  av_assert0(frame->buf[0]);
537 
538  return ret < 0 ? ret : 0;
539 }
540 
542 {
543  int ret;
544  int64_t discarded_samples = 0;
545 
546  while (!frame->buf[0]) {
547  if (discarded_samples > avctx->max_samples)
548  return AVERROR(EAGAIN);
549  ret = decode_simple_internal(avctx, frame, &discarded_samples);
550  if (ret < 0)
551  return ret;
552  }
553 
554  return 0;
555 }
556 
558 {
559  AVCodecInternal *avci = avctx->internal;
560  int ret;
561 
562  av_assert0(!frame->buf[0]);
563 
564  if (avctx->codec->receive_frame) {
565  ret = avctx->codec->receive_frame(avctx, frame);
566  if (ret != AVERROR(EAGAIN))
568  } else
569  ret = decode_simple_receive_frame(avctx, frame);
570 
571  if (ret == AVERROR_EOF)
572  avci->draining_done = 1;
573 
574  if (!ret) {
576  frame->pts,
577  frame->pkt_dts);
578 
579  /* the only case where decode data is not set should be decoders
580  * that do not call ff_get_buffer() */
581  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
582  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
583 
584  if (frame->private_ref) {
586 
587  if (fdd->post_process) {
588  ret = fdd->post_process(avctx, frame);
589  if (ret < 0) {
590  av_frame_unref(frame);
591  return ret;
592  }
593  }
594  }
595  }
596 
597  /* free the per-frame decode data */
598  av_buffer_unref(&frame->private_ref);
599 
600  return ret;
601 }
602 
603 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
604 {
605  AVCodecInternal *avci = avctx->internal;
606  int ret;
607 
608  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
609  return AVERROR(EINVAL);
610 
611  if (avctx->internal->draining)
612  return AVERROR_EOF;
613 
614  if (avpkt && !avpkt->size && avpkt->data)
615  return AVERROR(EINVAL);
616 
618  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
619  ret = av_packet_ref(avci->buffer_pkt, avpkt);
620  if (ret < 0)
621  return ret;
622  }
623 
624  ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
625  if (ret < 0) {
627  return ret;
628  }
629 
630  if (!avci->buffer_frame->buf[0]) {
631  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
632  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
633  return ret;
634  }
635 
636  return 0;
637 }
638 
640 {
641  /* make sure we are noisy about decoders returning invalid cropping data */
642  if (frame->crop_left >= INT_MAX - frame->crop_right ||
643  frame->crop_top >= INT_MAX - frame->crop_bottom ||
644  (frame->crop_left + frame->crop_right) >= frame->width ||
645  (frame->crop_top + frame->crop_bottom) >= frame->height) {
646  av_log(avctx, AV_LOG_WARNING,
647  "Invalid cropping information set by a decoder: "
649  "(frame size %dx%d). This is a bug, please report it\n",
650  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
651  frame->width, frame->height);
652  frame->crop_left = 0;
653  frame->crop_right = 0;
654  frame->crop_top = 0;
655  frame->crop_bottom = 0;
656  return 0;
657  }
658 
659  if (!avctx->apply_cropping)
660  return 0;
661 
662  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
664 }
665 
666 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
667 {
668  AVCodecInternal *avci = avctx->internal;
669  int ret, changed;
670 
671  av_frame_unref(frame);
672 
673  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
674  return AVERROR(EINVAL);
675 
676  if (avci->buffer_frame->buf[0]) {
677  av_frame_move_ref(frame, avci->buffer_frame);
678  } else {
679  ret = decode_receive_frame_internal(avctx, frame);
680  if (ret < 0)
681  return ret;
682  }
683 
684  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
685  ret = apply_cropping(avctx, frame);
686  if (ret < 0) {
687  av_frame_unref(frame);
688  return ret;
689  }
690  }
691 
692  avctx->frame_number++;
693 
694  if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
695 
696  if (avctx->frame_number == 1) {
697  avci->initial_format = frame->format;
698  switch(avctx->codec_type) {
699  case AVMEDIA_TYPE_VIDEO:
700  avci->initial_width = frame->width;
701  avci->initial_height = frame->height;
702  break;
703  case AVMEDIA_TYPE_AUDIO:
704  avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
705  avctx->sample_rate;
706  avci->initial_channels = frame->channels;
707  avci->initial_channel_layout = frame->channel_layout;
708  break;
709  }
710  }
711 
712  if (avctx->frame_number > 1) {
713  changed = avci->initial_format != frame->format;
714 
715  switch(avctx->codec_type) {
716  case AVMEDIA_TYPE_VIDEO:
717  changed |= avci->initial_width != frame->width ||
718  avci->initial_height != frame->height;
719  break;
720  case AVMEDIA_TYPE_AUDIO:
721  changed |= avci->initial_sample_rate != frame->sample_rate ||
722  avci->initial_sample_rate != avctx->sample_rate ||
723  avci->initial_channels != frame->channels ||
724  avci->initial_channel_layout != frame->channel_layout;
725  break;
726  }
727 
728  if (changed) {
729  avci->changed_frames_dropped++;
730  av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
731  " drop count: %d \n",
732  avctx->frame_number, frame->pts,
733  avci->changed_frames_dropped);
734  av_frame_unref(frame);
735  return AVERROR_INPUT_CHANGED;
736  }
737  }
738  }
739  return 0;
740 }
741 
743  int *got_frame, const AVPacket *pkt)
744 {
745  AVCodecInternal *avci = avctx->internal;
746  int ret = 0;
747 
749 
750  if (avci->draining_done && pkt && pkt->size != 0) {
751  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
752  avcodec_flush_buffers(avctx);
753  }
754 
755  *got_frame = 0;
756 
757  if (avci->compat_decode_partial_size > 0 &&
758  avci->compat_decode_partial_size != pkt->size) {
759  av_log(avctx, AV_LOG_ERROR,
760  "Got unexpected packet size after a partial decode\n");
761  ret = AVERROR(EINVAL);
762  goto finish;
763  }
764 
765  if (!avci->compat_decode_partial_size) {
766  ret = avcodec_send_packet(avctx, pkt);
767  if (ret == AVERROR_EOF)
768  ret = 0;
769  else if (ret == AVERROR(EAGAIN)) {
770  /* we fully drain all the output in each decode call, so this should not
771  * ever happen */
772  ret = AVERROR_BUG;
773  goto finish;
774  } else if (ret < 0)
775  goto finish;
776  }
777 
778  while (ret >= 0) {
779  ret = avcodec_receive_frame(avctx, frame);
780  if (ret < 0) {
781  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
782  ret = 0;
783  goto finish;
784  }
785 
786  if (frame != avci->compat_decode_frame) {
787  if (!avctx->refcounted_frames) {
788  ret = unrefcount_frame(avci, frame);
789  if (ret < 0)
790  goto finish;
791  }
792 
793  *got_frame = 1;
794  frame = avci->compat_decode_frame;
795  } else {
796  if (!avci->compat_decode_warned) {
797  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
798  "API cannot return all the frames for this decoder. "
799  "Some frames will be dropped. Update your code to the "
800  "new decoding API to fix this.\n");
801  avci->compat_decode_warned = 1;
802  }
803  }
804 
805  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
806  break;
807  }
808 
809 finish:
810  if (ret == 0) {
811  /* if there are any bsfs then assume full packet is always consumed */
812  if (avctx->codec->bsfs)
813  ret = pkt->size;
814  else
815  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
816  }
817  avci->compat_decode_consumed = 0;
818  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
819 
820  return ret;
821 }
822 
823 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
824  int *got_picture_ptr,
825  const AVPacket *avpkt)
826 {
827  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
828 }
829 
830 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
831  AVFrame *frame,
832  int *got_frame_ptr,
833  const AVPacket *avpkt)
834 {
835  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
836 }
837 
839 {
840  memset(sub, 0, sizeof(*sub));
841  sub->pts = AV_NOPTS_VALUE;
842 }
843 
844 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
845 static int recode_subtitle(AVCodecContext *avctx,
846  AVPacket *outpkt, const AVPacket *inpkt)
847 {
848 #if CONFIG_ICONV
849  iconv_t cd = (iconv_t)-1;
850  int ret = 0;
851  char *inb, *outb;
852  size_t inl, outl;
853  AVPacket tmp;
854 #endif
855 
856  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
857  return 0;
858 
859 #if CONFIG_ICONV
860  cd = iconv_open("UTF-8", avctx->sub_charenc);
861  av_assert0(cd != (iconv_t)-1);
862 
863  inb = inpkt->data;
864  inl = inpkt->size;
865 
866  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
867  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
868  ret = AVERROR(ENOMEM);
869  goto end;
870  }
871 
872  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
873  if (ret < 0)
874  goto end;
875  outpkt->buf = tmp.buf;
876  outpkt->data = tmp.data;
877  outpkt->size = tmp.size;
878  outb = outpkt->data;
879  outl = outpkt->size;
880 
881  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
882  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
883  outl >= outpkt->size || inl != 0) {
884  ret = FFMIN(AVERROR(errno), -1);
885  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
886  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
887  av_packet_unref(&tmp);
888  goto end;
889  }
890  outpkt->size -= outl;
891  memset(outpkt->data + outpkt->size, 0, outl);
892 
893 end:
894  if (cd != (iconv_t)-1)
895  iconv_close(cd);
896  return ret;
897 #else
898  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
899  return AVERROR(EINVAL);
900 #endif
901 }
902 
903 static int utf8_check(const uint8_t *str)
904 {
905  const uint8_t *byte;
906  uint32_t codepoint, min;
907 
908  while (*str) {
909  byte = str;
910  GET_UTF8(codepoint, *(byte++), return 0;);
911  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
912  1 << (5 * (byte - str) - 4);
913  if (codepoint < min || codepoint >= 0x110000 ||
914  codepoint == 0xFFFE /* BOM */ ||
915  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
916  return 0;
917  str = byte;
918  }
919  return 1;
920 }
921 
922 #if FF_API_ASS_TIMING
923 static void insert_ts(AVBPrint *buf, int ts)
924 {
925  if (ts == -1) {
926  av_bprintf(buf, "9:59:59.99,");
927  } else {
928  int h, m, s;
929 
930  h = ts/360000; ts -= 360000*h;
931  m = ts/ 6000; ts -= 6000*m;
932  s = ts/ 100; ts -= 100*s;
933  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
934  }
935 }
936 
938 {
939  int i;
940  AVBPrint buf;
941 
943 
944  for (i = 0; i < sub->num_rects; i++) {
945  char *final_dialog;
946  const char *dialog;
947  AVSubtitleRect *rect = sub->rects[i];
948  int ts_start, ts_duration = -1;
949  long int layer;
950 
951  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
952  continue;
953 
954  av_bprint_clear(&buf);
955 
956  /* skip ReadOrder */
957  dialog = strchr(rect->ass, ',');
958  if (!dialog)
959  continue;
960  dialog++;
961 
962  /* extract Layer or Marked */
963  layer = strtol(dialog, (char**)&dialog, 10);
964  if (*dialog != ',')
965  continue;
966  dialog++;
967 
968  /* rescale timing to ASS time base (ms) */
969  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
970  if (pkt->duration != -1)
971  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
972  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
973 
974  /* construct ASS (standalone file form with timestamps) string */
975  av_bprintf(&buf, "Dialogue: %ld,", layer);
976  insert_ts(&buf, ts_start);
977  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
978  av_bprintf(&buf, "%s\r\n", dialog);
979 
980  final_dialog = av_strdup(buf.str);
981  if (!av_bprint_is_complete(&buf) || !final_dialog) {
982  av_freep(&final_dialog);
983  av_bprint_finalize(&buf, NULL);
984  return AVERROR(ENOMEM);
985  }
986  av_freep(&rect->ass);
987  rect->ass = final_dialog;
988  }
989 
990  av_bprint_finalize(&buf, NULL);
991  return 0;
992 }
993 #endif
994 
996  int *got_sub_ptr,
997  AVPacket *avpkt)
998 {
999  int i, ret = 0;
1000 
1001  if (!avpkt->data && avpkt->size) {
1002  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1003  return AVERROR(EINVAL);
1004  }
1005  if (!avctx->codec)
1006  return AVERROR(EINVAL);
1007  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1008  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1009  return AVERROR(EINVAL);
1010  }
1011 
1012  *got_sub_ptr = 0;
1013  get_subtitle_defaults(sub);
1014 
1015  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1016  AVPacket pkt_recoded = *avpkt;
1017 
1018  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1019  if (ret < 0) {
1020  *got_sub_ptr = 0;
1021  } else {
1022  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1023  if (ret < 0)
1024  return ret;
1025 
1026  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1027  sub->pts = av_rescale_q(avpkt->pts,
1028  avctx->pkt_timebase, AV_TIME_BASE_Q);
1029  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1030  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1031  !!*got_sub_ptr >= !!sub->num_rects);
1032 
1033 #if FF_API_ASS_TIMING
1035  && *got_sub_ptr && sub->num_rects) {
1036  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1037  : avctx->time_base;
1038  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1039  if (err < 0)
1040  ret = err;
1041  }
1042 #endif
1043 
1044  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1045  avctx->pkt_timebase.num) {
1046  AVRational ms = { 1, 1000 };
1047  sub->end_display_time = av_rescale_q(avpkt->duration,
1048  avctx->pkt_timebase, ms);
1049  }
1050 
1052  sub->format = 0;
1053  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1054  sub->format = 1;
1055 
1056  for (i = 0; i < sub->num_rects; i++) {
1058  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1059  av_log(avctx, AV_LOG_ERROR,
1060  "Invalid UTF-8 in decoded subtitles text; "
1061  "maybe missing -sub_charenc option\n");
1062  avsubtitle_free(sub);
1063  ret = AVERROR_INVALIDDATA;
1064  break;
1065  }
1066  }
1067 
1068  if (avpkt->data != pkt_recoded.data) { // did we recode?
1069  /* prevent from destroying side data from original packet */
1070  pkt_recoded.side_data = NULL;
1071  pkt_recoded.side_data_elems = 0;
1072 
1073  av_packet_unref(&pkt_recoded);
1074  }
1075  }
1076 
1077  if (*got_sub_ptr)
1078  avctx->frame_number++;
1079  }
1080 
1081  return ret;
1082 }
1083 
1085  const enum AVPixelFormat *fmt)
1086 {
1087  const AVPixFmtDescriptor *desc;
1088  const AVCodecHWConfig *config;
1089  int i, n;
1090 
1091  // If a device was supplied when the codec was opened, assume that the
1092  // user wants to use it.
1093  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1094  AVHWDeviceContext *device_ctx =
1096  for (i = 0;; i++) {
1097  config = &avctx->codec->hw_configs[i]->public;
1098  if (!config)
1099  break;
1100  if (!(config->methods &
1102  continue;
1103  if (device_ctx->type != config->device_type)
1104  continue;
1105  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1106  if (config->pix_fmt == fmt[n])
1107  return fmt[n];
1108  }
1109  }
1110  }
1111  // No device or other setup, so we have to choose from things which
1112  // don't any other external information.
1113 
1114  // If the last element of the list is a software format, choose it
1115  // (this should be best software format if any exist).
1116  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1117  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1118  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1119  return fmt[n - 1];
1120 
1121  // Finally, traverse the list in order and choose the first entry
1122  // with no external dependencies (if there is no hardware configuration
1123  // information available then this just picks the first entry).
1124  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1125  for (i = 0;; i++) {
1126  config = avcodec_get_hw_config(avctx->codec, i);
1127  if (!config)
1128  break;
1129  if (config->pix_fmt == fmt[n])
1130  break;
1131  }
1132  if (!config) {
1133  // No specific config available, so the decoder must be able
1134  // to handle this format without any additional setup.
1135  return fmt[n];
1136  }
1138  // Usable with only internal setup.
1139  return fmt[n];
1140  }
1141  }
1142 
1143  // Nothing is usable, give up.
1144  return AV_PIX_FMT_NONE;
1145 }
1146 
1148  enum AVHWDeviceType dev_type)
1149 {
1150  AVHWDeviceContext *device_ctx;
1151  AVHWFramesContext *frames_ctx;
1152  int ret;
1153 
1154  if (!avctx->hwaccel)
1155  return AVERROR(ENOSYS);
1156 
1157  if (avctx->hw_frames_ctx)
1158  return 0;
1159  if (!avctx->hw_device_ctx) {
1160  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1161  "required for hardware accelerated decoding.\n");
1162  return AVERROR(EINVAL);
1163  }
1164 
1165  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1166  if (device_ctx->type != dev_type) {
1167  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1168  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1169  av_hwdevice_get_type_name(device_ctx->type));
1170  return AVERROR(EINVAL);
1171  }
1172 
1174  avctx->hw_device_ctx,
1175  avctx->hwaccel->pix_fmt,
1176  &avctx->hw_frames_ctx);
1177  if (ret < 0)
1178  return ret;
1179 
1180  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1181 
1182 
1183  if (frames_ctx->initial_pool_size) {
1184  // We guarantee 4 base work surfaces. The function above guarantees 1
1185  // (the absolute minimum), so add the missing count.
1186  frames_ctx->initial_pool_size += 3;
1187  }
1188 
1189  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1190  if (ret < 0) {
1191  av_buffer_unref(&avctx->hw_frames_ctx);
1192  return ret;
1193  }
1194 
1195  return 0;
1196 }
1197 
1199  AVBufferRef *device_ref,
1201  AVBufferRef **out_frames_ref)
1202 {
1203  AVBufferRef *frames_ref = NULL;
1204  const AVCodecHWConfigInternal *hw_config;
1205  const AVHWAccel *hwa;
1206  int i, ret;
1207 
1208  for (i = 0;; i++) {
1209  hw_config = avctx->codec->hw_configs[i];
1210  if (!hw_config)
1211  return AVERROR(ENOENT);
1212  if (hw_config->public.pix_fmt == hw_pix_fmt)
1213  break;
1214  }
1215 
1216  hwa = hw_config->hwaccel;
1217  if (!hwa || !hwa->frame_params)
1218  return AVERROR(ENOENT);
1219 
1220  frames_ref = av_hwframe_ctx_alloc(device_ref);
1221  if (!frames_ref)
1222  return AVERROR(ENOMEM);
1223 
1224  ret = hwa->frame_params(avctx, frames_ref);
1225  if (ret >= 0) {
1226  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1227 
1228  if (frames_ctx->initial_pool_size) {
1229  // If the user has requested that extra output surfaces be
1230  // available then add them here.
1231  if (avctx->extra_hw_frames > 0)
1232  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1233 
1234  // If frame threading is enabled then an extra surface per thread
1235  // is also required.
1236  if (avctx->active_thread_type & FF_THREAD_FRAME)
1237  frames_ctx->initial_pool_size += avctx->thread_count;
1238  }
1239 
1240  *out_frames_ref = frames_ref;
1241  } else {
1242  av_buffer_unref(&frames_ref);
1243  }
1244  return ret;
1245 }
1246 
1247 static int hwaccel_init(AVCodecContext *avctx,
1248  const AVCodecHWConfigInternal *hw_config)
1249 {
1250  const AVHWAccel *hwaccel;
1251  int err;
1252 
1253  hwaccel = hw_config->hwaccel;
1256  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1257  hwaccel->name);
1258  return AVERROR_PATCHWELCOME;
1259  }
1260 
1261  if (hwaccel->priv_data_size) {
1262  avctx->internal->hwaccel_priv_data =
1263  av_mallocz(hwaccel->priv_data_size);
1264  if (!avctx->internal->hwaccel_priv_data)
1265  return AVERROR(ENOMEM);
1266  }
1267 
1268  avctx->hwaccel = hwaccel;
1269  if (hwaccel->init) {
1270  err = hwaccel->init(avctx);
1271  if (err < 0) {
1272  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1273  "hwaccel initialisation returned error.\n",
1274  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1276  avctx->hwaccel = NULL;
1277  return err;
1278  }
1279  }
1280 
1281  return 0;
1282 }
1283 
1284 static void hwaccel_uninit(AVCodecContext *avctx)
1285 {
1286  if (avctx->hwaccel && avctx->hwaccel->uninit)
1287  avctx->hwaccel->uninit(avctx);
1288 
1290 
1291  avctx->hwaccel = NULL;
1292 
1293  av_buffer_unref(&avctx->hw_frames_ctx);
1294 }
1295 
1296 int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
1297 {
1298  const AVPixFmtDescriptor *desc;
1299  enum AVPixelFormat *choices;
1300  enum AVPixelFormat ret, user_choice;
1301  const AVCodecHWConfigInternal *hw_config;
1302  const AVCodecHWConfig *config;
1303  int i, n, err;
1304 
1305  // Find end of list.
1306  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1307  // Must contain at least one entry.
1308  av_assert0(n >= 1);
1309  // If a software format is available, it must be the last entry.
1310  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1311  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1312  // No software format is available.
1313  } else {
1314  avctx->sw_pix_fmt = fmt[n - 1];
1315  }
1316 
1317  choices = av_malloc_array(n + 1, sizeof(*choices));
1318  if (!choices)
1319  return AV_PIX_FMT_NONE;
1320 
1321  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1322 
1323  for (;;) {
1324  // Remove the previous hwaccel, if there was one.
1325  hwaccel_uninit(avctx);
1326 
1327  user_choice = avctx->get_format(avctx, choices);
1328  if (user_choice == AV_PIX_FMT_NONE) {
1329  // Explicitly chose nothing, give up.
1330  ret = AV_PIX_FMT_NONE;
1331  break;
1332  }
1333 
1334  desc = av_pix_fmt_desc_get(user_choice);
1335  if (!desc) {
1336  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1337  "get_format() callback.\n");
1338  ret = AV_PIX_FMT_NONE;
1339  break;
1340  }
1341  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1342  desc->name);
1343 
1344  for (i = 0; i < n; i++) {
1345  if (choices[i] == user_choice)
1346  break;
1347  }
1348  if (i == n) {
1349  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1350  "%s not in possible list.\n", desc->name);
1351  ret = AV_PIX_FMT_NONE;
1352  break;
1353  }
1354 
1355  if (avctx->codec->hw_configs) {
1356  for (i = 0;; i++) {
1357  hw_config = avctx->codec->hw_configs[i];
1358  if (!hw_config)
1359  break;
1360  if (hw_config->public.pix_fmt == user_choice)
1361  break;
1362  }
1363  } else {
1364  hw_config = NULL;
1365  }
1366 
1367  if (!hw_config) {
1368  // No config available, so no extra setup required.
1369  ret = user_choice;
1370  break;
1371  }
1372  config = &hw_config->public;
1373 
1374  if (config->methods &
1376  avctx->hw_frames_ctx) {
1377  const AVHWFramesContext *frames_ctx =
1379  if (frames_ctx->format != user_choice) {
1380  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1381  "does not match the format of the provided frames "
1382  "context.\n", desc->name);
1383  goto try_again;
1384  }
1385  } else if (config->methods &
1387  avctx->hw_device_ctx) {
1388  const AVHWDeviceContext *device_ctx =
1390  if (device_ctx->type != config->device_type) {
1391  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1392  "does not match the type of the provided device "
1393  "context.\n", desc->name);
1394  goto try_again;
1395  }
1396  } else if (config->methods &
1398  // Internal-only setup, no additional configuration.
1399  } else if (config->methods &
1401  // Some ad-hoc configuration we can't see and can't check.
1402  } else {
1403  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1404  "missing configuration.\n", desc->name);
1405  goto try_again;
1406  }
1407  if (hw_config->hwaccel) {
1408  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1409  "initialisation.\n", desc->name);
1410  err = hwaccel_init(avctx, hw_config);
1411  if (err < 0)
1412  goto try_again;
1413  }
1414  ret = user_choice;
1415  break;
1416 
1417  try_again:
1418  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1419  "get_format() without it.\n", desc->name);
1420  for (i = 0; i < n; i++) {
1421  if (choices[i] == user_choice)
1422  break;
1423  }
1424  for (; i + 1 < n; i++)
1425  choices[i] = choices[i + 1];
1426  --n;
1427  }
1428 
1429  av_freep(&choices);
1430  return ret;
1431 }
1432 
1433 static void frame_pool_free(void *opaque, uint8_t *data)
1434 {
1435  FramePool *pool = (FramePool*)data;
1436  int i;
1437 
1438  for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
1439  av_buffer_pool_uninit(&pool->pools[i]);
1440 
1441  av_freep(&data);
1442 }
1443 
1445 {
1446  FramePool *pool = av_mallocz(sizeof(*pool));
1447  AVBufferRef *buf;
1448 
1449  if (!pool)
1450  return NULL;
1451 
1452  buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
1453  frame_pool_free, NULL, 0);
1454  if (!buf) {
1455  av_freep(&pool);
1456  return NULL;
1457  }
1458 
1459  return buf;
1460 }
1461 
1463 {
1464  FramePool *pool = avctx->internal->pool ?
1465  (FramePool*)avctx->internal->pool->data : NULL;
1466  AVBufferRef *pool_buf;
1467  int i, ret, ch, planes;
1468 
1469  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1470  int planar = av_sample_fmt_is_planar(frame->format);
1471  ch = frame->channels;
1472  planes = planar ? ch : 1;
1473  }
1474 
1475  if (pool && pool->format == frame->format) {
1476  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1477  pool->width == frame->width && pool->height == frame->height)
1478  return 0;
1479  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
1480  pool->channels == ch && frame->nb_samples == pool->samples)
1481  return 0;
1482  }
1483 
1484  pool_buf = frame_pool_alloc();
1485  if (!pool_buf)
1486  return AVERROR(ENOMEM);
1487  pool = (FramePool*)pool_buf->data;
1488 
1489  switch (avctx->codec_type) {
1490  case AVMEDIA_TYPE_VIDEO: {
1491  int linesize[4];
1492  int w = frame->width;
1493  int h = frame->height;
1494  int unaligned;
1495  ptrdiff_t linesize1[4];
1496  size_t size[4];
1497 
1498  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1499 
1500  do {
1501  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1502  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1503  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1504  if (ret < 0)
1505  goto fail;
1506  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1507  w += w & ~(w - 1);
1508 
1509  unaligned = 0;
1510  for (i = 0; i < 4; i++)
1511  unaligned |= linesize[i] % pool->stride_align[i];
1512  } while (unaligned);
1513 
1514  for (i = 0; i < 4; i++)
1515  linesize1[i] = linesize[i];
1516  ret = av_image_fill_plane_sizes(size, avctx->pix_fmt, h, linesize1);
1517  if (ret < 0)
1518  goto fail;
1519 
1520  for (i = 0; i < 4; i++) {
1521  pool->linesize[i] = linesize[i];
1522  if (size[i]) {
1523  if (size[i] > INT_MAX - (16 + STRIDE_ALIGN - 1)) {
1524  ret = AVERROR(EINVAL);
1525  goto fail;
1526  }
1527  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1528  CONFIG_MEMORY_POISONING ?
1529  NULL :
1531  if (!pool->pools[i]) {
1532  ret = AVERROR(ENOMEM);
1533  goto fail;
1534  }
1535  }
1536  }
1537  pool->format = frame->format;
1538  pool->width = frame->width;
1539  pool->height = frame->height;
1540 
1541  break;
1542  }
1543  case AVMEDIA_TYPE_AUDIO: {
1544  ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1545  frame->nb_samples, frame->format, 0);
1546  if (ret < 0)
1547  goto fail;
1548 
1549  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1550  if (!pool->pools[0]) {
1551  ret = AVERROR(ENOMEM);
1552  goto fail;
1553  }
1554 
1555  pool->format = frame->format;
1556  pool->planes = planes;
1557  pool->channels = ch;
1558  pool->samples = frame->nb_samples;
1559  break;
1560  }
1561  default: av_assert0(0);
1562  }
1563 
1564  av_buffer_unref(&avctx->internal->pool);
1565  avctx->internal->pool = pool_buf;
1566 
1567  return 0;
1568 fail:
1569  av_buffer_unref(&pool_buf);
1570  return ret;
1571 }
1572 
1574 {
1575  FramePool *pool = (FramePool*)avctx->internal->pool->data;
1576  int planes = pool->planes;
1577  int i;
1578 
1579  frame->linesize[0] = pool->linesize[0];
1580 
1582  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1585  sizeof(*frame->extended_buf));
1586  if (!frame->extended_data || !frame->extended_buf) {
1587  av_freep(&frame->extended_data);
1588  av_freep(&frame->extended_buf);
1589  return AVERROR(ENOMEM);
1590  }
1591  } else {
1592  frame->extended_data = frame->data;
1593  av_assert0(frame->nb_extended_buf == 0);
1594  }
1595 
1596  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1597  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1598  if (!frame->buf[i])
1599  goto fail;
1600  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1601  }
1602  for (i = 0; i < frame->nb_extended_buf; i++) {
1603  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1604  if (!frame->extended_buf[i])
1605  goto fail;
1606  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1607  }
1608 
1609  if (avctx->debug & FF_DEBUG_BUFFERS)
1610  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1611 
1612  return 0;
1613 fail:
1614  av_frame_unref(frame);
1615  return AVERROR(ENOMEM);
1616 }
1617 
1619 {
1620  FramePool *pool = (FramePool*)s->internal->pool->data;
1622  int i;
1623 
1624  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1625  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1626  return -1;
1627  }
1628 
1629  if (!desc) {
1630  av_log(s, AV_LOG_ERROR,
1631  "Unable to get pixel format descriptor for format %s\n",
1632  av_get_pix_fmt_name(pic->format));
1633  return AVERROR(EINVAL);
1634  }
1635 
1636  memset(pic->data, 0, sizeof(pic->data));
1637  pic->extended_data = pic->data;
1638 
1639  for (i = 0; i < 4 && pool->pools[i]; i++) {
1640  pic->linesize[i] = pool->linesize[i];
1641 
1642  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1643  if (!pic->buf[i])
1644  goto fail;
1645 
1646  pic->data[i] = pic->buf[i]->data;
1647  }
1648  for (; i < AV_NUM_DATA_POINTERS; i++) {
1649  pic->data[i] = NULL;
1650  pic->linesize[i] = 0;
1651  }
1652  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1653  ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1654  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1655 
1656  if (s->debug & FF_DEBUG_BUFFERS)
1657  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1658 
1659  return 0;
1660 fail:
1661  av_frame_unref(pic);
1662  return AVERROR(ENOMEM);
1663 }
1664 
1666 {
1667  int ret;
1668 
1669  if (avctx->hw_frames_ctx) {
1670  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1671  frame->width = avctx->coded_width;
1672  frame->height = avctx->coded_height;
1673  return ret;
1674  }
1675 
1676  if ((ret = update_frame_pool(avctx, frame)) < 0)
1677  return ret;
1678 
1679  switch (avctx->codec_type) {
1680  case AVMEDIA_TYPE_VIDEO:
1681  return video_get_buffer(avctx, frame);
1682  case AVMEDIA_TYPE_AUDIO:
1683  return audio_get_buffer(avctx, frame);
1684  default:
1685  return -1;
1686  }
1687 }
1688 
1690 {
1691  int size;
1692  const uint8_t *side_metadata;
1693 
1694  AVDictionary **frame_md = &frame->metadata;
1695 
1696  side_metadata = av_packet_get_side_data(avpkt,
1698  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1699 }
1700 
1702 {
1703  AVPacket *pkt = avctx->internal->last_pkt_props;
1704  int i;
1705  static const struct {
1706  enum AVPacketSideDataType packet;
1708  } sd[] = {
1719  };
1720 
1721  if (IS_EMPTY(pkt))
1723  &avctx->internal->pkt_props_tail,
1724  pkt);
1725 
1726  if (pkt) {
1727  frame->pts = pkt->pts;
1728 #if FF_API_PKT_PTS
1730  frame->pkt_pts = pkt->pts;
1732 #endif
1733  frame->pkt_pos = pkt->pos;
1734  frame->pkt_duration = pkt->duration;
1735  frame->pkt_size = pkt->size;
1736 
1737  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1738  int size;
1739  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1740  if (packet_sd) {
1741  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1742  sd[i].frame,
1743  size);
1744  if (!frame_sd)
1745  return AVERROR(ENOMEM);
1746 
1747  memcpy(frame_sd->data, packet_sd, size);
1748  }
1749  }
1750  add_metadata_from_side_data(pkt, frame);
1751 
1752  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1753  frame->flags |= AV_FRAME_FLAG_DISCARD;
1754  } else {
1755  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1756  }
1757  }
1758  frame->reordered_opaque = avctx->reordered_opaque;
1759 
1760  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1761  frame->color_primaries = avctx->color_primaries;
1762  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1763  frame->color_trc = avctx->color_trc;
1764  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1765  frame->colorspace = avctx->colorspace;
1766  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1767  frame->color_range = avctx->color_range;
1769  frame->chroma_location = avctx->chroma_sample_location;
1770 
1771  switch (avctx->codec->type) {
1772  case AVMEDIA_TYPE_VIDEO:
1773  frame->format = avctx->pix_fmt;
1774  if (!frame->sample_aspect_ratio.num)
1775  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1776 
1777  if (frame->width && frame->height &&
1778  av_image_check_sar(frame->width, frame->height,
1779  frame->sample_aspect_ratio) < 0) {
1780  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1781  frame->sample_aspect_ratio.num,
1782  frame->sample_aspect_ratio.den);
1783  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1784  }
1785 
1786  break;
1787  case AVMEDIA_TYPE_AUDIO:
1788  if (!frame->sample_rate)
1789  frame->sample_rate = avctx->sample_rate;
1790  if (frame->format < 0)
1791  frame->format = avctx->sample_fmt;
1792  if (!frame->channel_layout) {
1793  if (avctx->channel_layout) {
1795  avctx->channels) {
1796  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1797  "configuration.\n");
1798  return AVERROR(EINVAL);
1799  }
1800 
1801  frame->channel_layout = avctx->channel_layout;
1802  } else {
1803  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1804  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1805  avctx->channels);
1806  return AVERROR(ENOSYS);
1807  }
1808  }
1809  }
1810  frame->channels = avctx->channels;
1811  break;
1812  }
1813  return 0;
1814 }
1815 
1817 {
1818  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1819  int i;
1820  int num_planes = av_pix_fmt_count_planes(frame->format);
1822  int flags = desc ? desc->flags : 0;
1823  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1824  num_planes = 2;
1825  if ((flags & FF_PSEUDOPAL) && frame->data[1])
1826  num_planes = 2;
1827  for (i = 0; i < num_planes; i++) {
1828  av_assert0(frame->data[i]);
1829  }
1830  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1831  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1832  if (frame->data[i])
1833  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1834  frame->data[i] = NULL;
1835  }
1836  }
1837 }
1838 
1839 static void decode_data_free(void *opaque, uint8_t *data)
1840 {
1841  FrameDecodeData *fdd = (FrameDecodeData*)data;
1842 
1843  if (fdd->post_process_opaque_free)
1845 
1846  if (fdd->hwaccel_priv_free)
1847  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1848 
1849  av_freep(&fdd);
1850 }
1851 
1853 {
1854  AVBufferRef *fdd_buf;
1855  FrameDecodeData *fdd;
1856 
1857  av_assert1(!frame->private_ref);
1858  av_buffer_unref(&frame->private_ref);
1859 
1860  fdd = av_mallocz(sizeof(*fdd));
1861  if (!fdd)
1862  return AVERROR(ENOMEM);
1863 
1864  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1866  if (!fdd_buf) {
1867  av_freep(&fdd);
1868  return AVERROR(ENOMEM);
1869  }
1870 
1871  frame->private_ref = fdd_buf;
1872 
1873  return 0;
1874 }
1875 
1877 {
1878  const AVHWAccel *hwaccel = avctx->hwaccel;
1879  int override_dimensions = 1;
1880  int ret;
1881 
1882  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1883  if ((unsigned)avctx->width > INT_MAX - STRIDE_ALIGN ||
1884  (ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1885  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1886  ret = AVERROR(EINVAL);
1887  goto fail;
1888  }
1889 
1890  if (frame->width <= 0 || frame->height <= 0) {
1891  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1892  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1893  override_dimensions = 0;
1894  }
1895 
1896  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1897  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1898  ret = AVERROR(EINVAL);
1899  goto fail;
1900  }
1901  } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1902  if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) {
1903  av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
1904  ret = AVERROR(EINVAL);
1905  goto fail;
1906  }
1907  }
1908  ret = ff_decode_frame_props(avctx, frame);
1909  if (ret < 0)
1910  goto fail;
1911 
1912  if (hwaccel) {
1913  if (hwaccel->alloc_frame) {
1914  ret = hwaccel->alloc_frame(avctx, frame);
1915  goto end;
1916  }
1917  } else
1918  avctx->sw_pix_fmt = avctx->pix_fmt;
1919 
1920  ret = avctx->get_buffer2(avctx, frame, flags);
1921  if (ret < 0)
1922  goto fail;
1923 
1924  validate_avframe_allocation(avctx, frame);
1925 
1926  ret = ff_attach_decode_data(frame);
1927  if (ret < 0)
1928  goto fail;
1929 
1930 end:
1931  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1933  frame->width = avctx->width;
1934  frame->height = avctx->height;
1935  }
1936 
1937 fail:
1938  if (ret < 0) {
1939  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1940  av_frame_unref(frame);
1941  }
1942 
1943  return ret;
1944 }
1945 
1947 {
1948  AVFrame *tmp;
1949  int ret;
1950 
1952 
1953  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1954  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1955  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1956  av_frame_unref(frame);
1957  }
1958 
1959  if (!frame->data[0])
1960  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1961 
1962  if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
1963  return ff_decode_frame_props(avctx, frame);
1964 
1965  tmp = av_frame_alloc();
1966  if (!tmp)
1967  return AVERROR(ENOMEM);
1968 
1969  av_frame_move_ref(tmp, frame);
1970 
1971  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1972  if (ret < 0) {
1973  av_frame_free(&tmp);
1974  return ret;
1975  }
1976 
1977  av_frame_copy(frame, tmp);
1978  av_frame_free(&tmp);
1979 
1980  return 0;
1981 }
1982 
1984 {
1985  int ret = reget_buffer_internal(avctx, frame, flags);
1986  if (ret < 0)
1987  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1988  return ret;
1989 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:97
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1599
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:783
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
int nb_draining_errors
Definition: internal.h:197
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:2142
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1296
const struct AVCodec * codec
Definition: avcodec.h:540
AVRational framerate
Definition: avcodec.h:2084
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:2105
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AV_NUM_DATA_POINTERS
Definition: frame.h:315
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: packet.h:40
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:2122
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:426
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
int stride_align[AV_NUM_DATA_POINTERS]
Definition: decode.c:61
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:2329
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:557
The codec supports this format by some internal method.
Definition: codec.h:411
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:384
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:719
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:585
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:2458
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:147
const char * desc
Definition: libsvtav1.c:79
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:937
int changed_frames_dropped
Definition: internal.h:200
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
AVFrame * to_free
Definition: internal.h:134
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:383
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:838
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:505
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:541
int width
Definition: decode.c:60
int(* receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: codec.h:298
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: packet.h:114
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:242
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1166
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:523
int ff_decode_bsfs_init(AVCodecContext *avctx)
Called during avcodec_open2() to initialize avctx->internal->bsf.
Definition: decode.c:214
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
int size
Definition: packet.h:364
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:101
int initial_channels
Definition: internal.h:204
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:2452
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:910
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
static void frame_pool_free(void *opaque, uint8_t *data)
Definition: decode.c:1433
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
int samples
Definition: decode.c:65
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:830
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:2713
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
enum AVMediaType type
Definition: codec.h:203
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:68
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:845
AVBufferPool * pools[4]
Pools for each data plane.
Definition: decode.c:54
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1701
size_t crop_bottom
Definition: frame.h:675
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:995
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1695
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:2471
static int utf8_check(const uint8_t *str)
Definition: decode.c:903
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:639
Mastering display metadata (based on SMPTE-2086:2014).
Definition: packet.h:222
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:654
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:959
AVSubtitleRect ** rects
Definition: avcodec.h:2714
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:2563
static AVBufferRef * frame_pool_alloc(void)
Definition: decode.c:1444
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int height
Definition: decode.c:60
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1084
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1199
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1852
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:530
AVOptions.
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:289
size_t crop_left
Definition: frame.h:676
AVPacket pkt
Definition: packet.h:397
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:176
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:381
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:271
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:693
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:248
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:407
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:432
int planes
Definition: decode.c:63
Structure to hold side data for an AVFrame.
Definition: frame.h:220
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:322
size_t compat_decode_consumed
Definition: internal.h:185
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: packet.h:363
#define FF_REGET_BUFFER_FLAG_READONLY
the returned buffer does not need to be writable
Definition: internal.h:298
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:600
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1770
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:2555
ptrdiff_t size
Definition: opengl_enc.c:100
int initial_height
Definition: internal.h:202
int initial_format
Definition: internal.h:201
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1173
#define FFALIGN(x, a)
Definition: macros.h:48
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:391
#define av_log(a,...)
The buffer pool.
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:615
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:156
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
AVPacketList * pkt_props_tail
Definition: internal.h:149
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:88
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2098
AVBSFContext * bsf
Definition: internal.h:141
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:821
void * post_process_opaque
Definition: decode.h:46
#define AV_BPRINT_SIZE_UNLIMITED
const struct AVCodecHWConfigInternal *const * hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: codec.h:323
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1247
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1816
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: packet.h:72
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:891
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ICC profile data consisting of an opaque octet buffer following the format described by ISO 15076-1...
Definition: packet.h:274
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:2123
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:1983
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1809
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:437
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:1973
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:666
AVFrame * buffer_frame
Definition: internal.h:180
int capabilities
Codec capabilities.
Definition: codec.h:209
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:558
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:346
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:611
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:569
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: bsf.h:89
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
int side_data_elems
Definition: packet.h:375
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:2271
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:79
#define FFMAX(a, b)
Definition: common.h:94
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
#define fail()
Definition: checkasm.h:123
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:799
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwconfig.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:2569
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:369
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:147
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1242
uint32_t end_display_time
Definition: avcodec.h:2712
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2715
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:491
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
size_t crop_top
Definition: frame.h:674
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
Parse string describing list of bitstream filters and create single AVBSFContext describing the whole...
Definition: bsf.c:522
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:572
int channels
number of audio channels, only used for audio.
Definition: frame.h:620
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:551
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1660
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1801
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2431
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:519
int channels
Definition: decode.c:64
AVFrame * compat_decode_frame
Definition: internal.h:189
int width
picture width / height.
Definition: avcodec.h:704
uint8_t w
Definition: llviddspenc.c:39
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:2241
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1689
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:742
AVPacket * in_pkt
Definition: internal.h:112
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: packet.h:228
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1145
AVFrameSideDataType
Definition: frame.h:48
#define AVERROR_INPUT_CHANGED
Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) ...
Definition: error.h:73
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint16_t format
Definition: avcodec.h:2710
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:1638
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:1688
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1671
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: codec.h:314
DecodeSimpleContext ds
Definition: internal.h:140
int avpriv_packet_list_put(AVPacketList **packet_buffer, AVPacketList **plast_pktl, AVPacket *pkt, int(*copy)(AVPacket *dst, const AVPacket *src), int flags)
Append an AVPacket to the list.
Definition: avpacket.c:729
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:2131
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1573
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:174
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1790
int linesize[4]
Definition: decode.c:62
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:2139
if(ret)
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: utils.c:1086
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
Content light level (based on CTA-861.3).
Definition: packet.h:235
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:603
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
Definition: decode.c:321
int(* decode)(struct AVCodecContext *, void *outdata, int *outdata_size, struct AVPacket *avpkt)
Definition: codec.h:284
AVPacketList * pkt_props
Definition: internal.h:148
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:47
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:200
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1665
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:539
int compat_decode_warned
Definition: internal.h:182
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:593
A list of zero terminated key/value strings.
Definition: packet.h:172
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:823
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:91
int sample_rate
samples per second
Definition: avcodec.h:1191
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
int initial_sample_rate
Definition: internal.h:203
int debug
debug
Definition: avcodec.h:1616
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1859
main external API structure.
Definition: avcodec.h:531
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
int skip_samples_multiplier
Definition: internal.h:194
uint8_t * data
The data buffer.
Definition: buffer.h:89
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:226
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1946
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1135
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:402
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1876
uint8_t * data
Definition: frame.h:222
int avpriv_packet_list_get(AVPacketList **pkt_buffer, AVPacketList **pkt_buffer_end, AVPacket *pkt)
Remove the oldest AVPacket in the list and return it.
Definition: avpacket.c:766
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
size_t crop_right
Definition: frame.h:677
int64_t max_samples
The number of samples per frame to maximally accept.
Definition: avcodec.h:2359
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:719
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:481
int sample_rate
Sample rate of the audio data.
Definition: frame.h:486
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1346
int showed_multi_packet_warning
Definition: internal.h:192
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:726
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: codec.h:93
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:303
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1159
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1152
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2115
Recommmends skipping the specified number of samples.
Definition: packet.h:156
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:2248
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:164
#define STRIDE_ALIGN
Definition: internal.h:108
enum AVChromaLocation chroma_location
Definition: frame.h:571
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:2584
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:578
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:1362
AVBufferRef * pool
Definition: internal.h:136
#define AV_CODEC_FLAG_DROPCHANGED
Don&#39;t output frames whose parameters differ from first decoded frame in stream.
Definition: avcodec.h:292
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:191
#define flags(name, subs,...)
Definition: cbs_av1.c:561
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: packet.h:99
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:404
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1147
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1839
#define UTF8_MAX_BYTES
Definition: decode.c:844
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:415
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:291
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:179
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:423
A reference to a data buffer.
Definition: buffer.h:81
int extra_hw_frames
Definition: avcodec.h:2343
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:168
The codec supports this format by some ad-hoc method.
Definition: codec.h:420
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: packet.h:374
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1198
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:46
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: packet.h:408
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal api header.
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:266
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:2591
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:114
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:923
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:169
int caps_internal
Internal codec capabilities.
Definition: codec.h:308
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:288
uint64_t initial_channel_layout
Definition: internal.h:205
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1618
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:2669
#define FF_PSEUDOPAL
Definition: internal.h:297
AVHWDeviceType
Definition: hwcontext.h:27
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int channels
number of audio channels
Definition: avcodec.h:1192
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:566
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:2704
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn&#39;t be output.
Definition: frame.h:543
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:618
enum AVColorPrimaries color_primaries
Definition: frame.h:560
static int extract_packet_props(AVCodecInternal *avci, AVPacket *pkt)
Definition: decode.c:148
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:362
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:2124
size_t compat_decode_partial_size
Definition: internal.h:188
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:317
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1222
#define IS_EMPTY(pkt)
Definition: decode.c:146
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1462
int height
Definition: frame.h:372
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:2121
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:562
static float sub(float src0, float src1)
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:364
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:444
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:2251
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:2143
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVSubtitleType type
Definition: avcodec.h:2695
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:361
int format
Definition: decode.c:59
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:2293
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:629
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
AVCodecParameters * par_in
Parameters of the input stream.
Definition: bsf.h:77
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:380
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1594
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:2091
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1284
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: packet.h:120
static uint8_t tmp[11]
Definition: aes_ctr.c:27