FFmpeg
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40 
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwaccel.h"
45 #include "internal.h"
46 #include "thread.h"
47 
48 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
49 {
50  int size = 0, ret;
51  const uint8_t *data;
52  uint32_t flags;
53  int64_t val;
54 
56  if (!data)
57  return 0;
58 
59  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
60  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
61  "changes, but PARAM_CHANGE side data was sent to it.\n");
62  ret = AVERROR(EINVAL);
63  goto fail2;
64  }
65 
66  if (size < 4)
67  goto fail;
68 
69  flags = bytestream_get_le32(&data);
70  size -= 4;
71 
73  if (size < 4)
74  goto fail;
75  val = bytestream_get_le32(&data);
76  if (val <= 0 || val > INT_MAX) {
77  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
79  goto fail2;
80  }
81  avctx->channels = val;
82  size -= 4;
83  }
85  if (size < 8)
86  goto fail;
87  avctx->channel_layout = bytestream_get_le64(&data);
88  size -= 8;
89  }
91  if (size < 4)
92  goto fail;
93  val = bytestream_get_le32(&data);
94  if (val <= 0 || val > INT_MAX) {
95  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
97  goto fail2;
98  }
99  avctx->sample_rate = val;
100  size -= 4;
101  }
103  if (size < 8)
104  goto fail;
105  avctx->width = bytestream_get_le32(&data);
106  avctx->height = bytestream_get_le32(&data);
107  size -= 8;
108  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
109  if (ret < 0)
110  goto fail2;
111  }
112 
113  return 0;
114 fail:
115  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
117 fail2:
118  if (ret < 0) {
119  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
120  if (avctx->err_recognition & AV_EF_EXPLODE)
121  return ret;
122  }
123  return 0;
124 }
125 
127 {
128  int ret = 0;
129 
131  if (pkt) {
132  ret = av_packet_copy_props(avci->last_pkt_props, pkt);
133  if (!ret)
134  avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_decode_frame_props().
135  }
136  return ret;
137 }
138 
140 {
141  int ret;
142 
143  /* move the original frame to our backup */
144  av_frame_unref(avci->to_free);
145  av_frame_move_ref(avci->to_free, frame);
146 
147  /* now copy everything except the AVBufferRefs back
148  * note that we make a COPY of the side data, so calling av_frame_free() on
149  * the caller's frame will work properly */
150  ret = av_frame_copy_props(frame, avci->to_free);
151  if (ret < 0)
152  return ret;
153 
154  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
155  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
156  if (avci->to_free->extended_data != avci->to_free->data) {
157  int planes = avci->to_free->channels;
158  int size = planes * sizeof(*frame->extended_data);
159 
160  if (!size) {
161  av_frame_unref(frame);
162  return AVERROR_BUG;
163  }
164 
165  frame->extended_data = av_malloc(size);
166  if (!frame->extended_data) {
167  av_frame_unref(frame);
168  return AVERROR(ENOMEM);
169  }
170  memcpy(frame->extended_data, avci->to_free->extended_data,
171  size);
172  } else
173  frame->extended_data = frame->data;
174 
175  frame->format = avci->to_free->format;
176  frame->width = avci->to_free->width;
177  frame->height = avci->to_free->height;
178  frame->channel_layout = avci->to_free->channel_layout;
179  frame->nb_samples = avci->to_free->nb_samples;
180  frame->channels = avci->to_free->channels;
181 
182  return 0;
183 }
184 
186 {
187  AVCodecInternal *avci = avctx->internal;
188  DecodeFilterContext *s = &avci->filter;
189  const char *bsfs_str;
190  int ret;
191 
192  if (s->nb_bsfs)
193  return 0;
194 
195  bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null";
196  while (bsfs_str && *bsfs_str) {
197  AVBSFContext **tmp;
198  const AVBitStreamFilter *filter;
199  char *bsf, *bsf_options_str, *bsf_name;
200 
201  bsf = av_get_token(&bsfs_str, ",");
202  if (!bsf) {
203  ret = AVERROR(ENOMEM);
204  goto fail;
205  }
206  bsf_name = av_strtok(bsf, "=", &bsf_options_str);
207  if (!bsf_name) {
208  av_freep(&bsf);
209  ret = AVERROR(ENOMEM);
210  goto fail;
211  }
212 
213  filter = av_bsf_get_by_name(bsf_name);
214  if (!filter) {
215  av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s "
216  "requested by a decoder. This is a bug, please report it.\n",
217  bsf_name);
218  av_freep(&bsf);
219  ret = AVERROR_BUG;
220  goto fail;
221  }
222 
223  tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs));
224  if (!tmp) {
225  av_freep(&bsf);
226  ret = AVERROR(ENOMEM);
227  goto fail;
228  }
229  s->bsfs = tmp;
230  s->nb_bsfs++;
231 
232  ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]);
233  if (ret < 0) {
234  av_freep(&bsf);
235  goto fail;
236  }
237 
238  if (s->nb_bsfs == 1) {
239  /* We do not currently have an API for passing the input timebase into decoders,
240  * but no filters used here should actually need it.
241  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
242  s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 };
244  avctx);
245  } else {
246  s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out;
247  ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in,
248  s->bsfs[s->nb_bsfs - 2]->par_out);
249  }
250  if (ret < 0) {
251  av_freep(&bsf);
252  goto fail;
253  }
254 
255  if (bsf_options_str && filter->priv_class) {
256  const AVOption *opt = av_opt_next(s->bsfs[s->nb_bsfs - 1]->priv_data, NULL);
257  const char * shorthand[2] = {NULL};
258 
259  if (opt)
260  shorthand[0] = opt->name;
261 
262  ret = av_opt_set_from_string(s->bsfs[s->nb_bsfs - 1]->priv_data, bsf_options_str, shorthand, "=", ":");
263  if (ret < 0) {
264  if (ret != AVERROR(ENOMEM)) {
265  av_log(avctx, AV_LOG_ERROR, "Invalid options for bitstream filter %s "
266  "requested by the decoder. This is a bug, please report it.\n",
267  bsf_name);
268  ret = AVERROR_BUG;
269  }
270  av_freep(&bsf);
271  goto fail;
272  }
273  }
274  av_freep(&bsf);
275 
276  ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]);
277  if (ret < 0)
278  goto fail;
279 
280  if (*bsfs_str)
281  bsfs_str++;
282  }
283 
284  return 0;
285 fail:
286  ff_decode_bsfs_uninit(avctx);
287  return ret;
288 }
289 
290 /* try to get one output packet from the filter chain */
291 static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
292 {
293  DecodeFilterContext *s = &avctx->internal->filter;
294  int idx, ret;
295 
296  /* start with the last filter in the chain */
297  idx = s->nb_bsfs - 1;
298  while (idx >= 0) {
299  /* request a packet from the currently selected filter */
300  ret = av_bsf_receive_packet(s->bsfs[idx], pkt);
301  if (ret == AVERROR(EAGAIN)) {
302  /* no packets available, try the next filter up the chain */
303  idx--;
304  continue;
305  } else if (ret < 0 && ret != AVERROR_EOF) {
306  return ret;
307  }
308 
309  /* got a packet or EOF -- pass it to the caller or to the next filter
310  * down the chain */
311  if (idx == s->nb_bsfs - 1) {
312  return ret;
313  } else {
314  idx++;
315  ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt);
316  if (ret < 0) {
317  av_log(avctx, AV_LOG_ERROR,
318  "Error pre-processing a packet before decoding\n");
319  av_packet_unref(pkt);
320  return ret;
321  }
322  }
323  }
324 
325  return AVERROR(EAGAIN);
326 }
327 
329 {
330  AVCodecInternal *avci = avctx->internal;
331  int ret;
332 
333  if (avci->draining)
334  return AVERROR_EOF;
335 
336  ret = bsfs_poll(avctx, pkt);
337  if (ret == AVERROR_EOF)
338  avci->draining = 1;
339  if (ret < 0)
340  return ret;
341 
342  ret = extract_packet_props(avctx->internal, pkt);
343  if (ret < 0)
344  goto finish;
345 
346  ret = apply_param_change(avctx, pkt);
347  if (ret < 0)
348  goto finish;
349 
350  if (avctx->codec->receive_frame)
351  avci->compat_decode_consumed += pkt->size;
352 
353  return 0;
354 finish:
355  av_packet_unref(pkt);
356  return ret;
357 }
358 
359 /**
360  * Attempt to guess proper monotonic timestamps for decoded video frames
361  * which might have incorrect times. Input timestamps may wrap around, in
362  * which case the output will as well.
363  *
364  * @param pts the pts field of the decoded AVPacket, as passed through
365  * AVFrame.pts
366  * @param dts the dts field of the decoded AVPacket
367  * @return one of the input values, may be AV_NOPTS_VALUE
368  */
370  int64_t reordered_pts, int64_t dts)
371 {
372  int64_t pts = AV_NOPTS_VALUE;
373 
374  if (dts != AV_NOPTS_VALUE) {
376  ctx->pts_correction_last_dts = dts;
377  } else if (reordered_pts != AV_NOPTS_VALUE)
378  ctx->pts_correction_last_dts = reordered_pts;
379 
380  if (reordered_pts != AV_NOPTS_VALUE) {
381  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
382  ctx->pts_correction_last_pts = reordered_pts;
383  } else if(dts != AV_NOPTS_VALUE)
384  ctx->pts_correction_last_pts = dts;
385 
387  && reordered_pts != AV_NOPTS_VALUE)
388  pts = reordered_pts;
389  else
390  pts = dts;
391 
392  return pts;
393 }
394 
395 /*
396  * The core of the receive_frame_wrapper for the decoders implementing
397  * the simple API. Certain decoders might consume partial packets without
398  * returning any output, so this function needs to be called in a loop until it
399  * returns EAGAIN.
400  **/
402 {
403  AVCodecInternal *avci = avctx->internal;
404  DecodeSimpleContext *ds = &avci->ds;
405  AVPacket *pkt = ds->in_pkt;
406  // copy to ensure we do not change pkt
407  int got_frame, actual_got_frame;
408  int ret;
409 
410  if (!pkt->data && !avci->draining) {
411  av_packet_unref(pkt);
412  ret = ff_decode_get_packet(avctx, pkt);
413  if (ret < 0 && ret != AVERROR_EOF)
414  return ret;
415  }
416 
417  // Some codecs (at least wma lossless) will crash when feeding drain packets
418  // after EOF was signaled.
419  if (avci->draining_done)
420  return AVERROR_EOF;
421 
422  if (!pkt->data &&
423  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
425  return AVERROR_EOF;
426 
427  got_frame = 0;
428 
429  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
430  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
431  } else {
432  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
433 
435  frame->pkt_dts = pkt->dts;
436  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
437  if(!avctx->has_b_frames)
438  frame->pkt_pos = pkt->pos;
439  //FIXME these should be under if(!avctx->has_b_frames)
440  /* get_buffer is supposed to set frame parameters */
441  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
442  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
443  if (!frame->width) frame->width = avctx->width;
444  if (!frame->height) frame->height = avctx->height;
445  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
446  }
447  }
448  }
449  emms_c();
450  actual_got_frame = got_frame;
451 
452  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
453  if (frame->flags & AV_FRAME_FLAG_DISCARD)
454  got_frame = 0;
455  if (got_frame)
457  frame->pts,
458  frame->pkt_dts);
459  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
460  uint8_t *side;
461  int side_size;
462  uint32_t discard_padding = 0;
463  uint8_t skip_reason = 0;
464  uint8_t discard_reason = 0;
465 
466  if (ret >= 0 && got_frame) {
468  frame->pts,
469  frame->pkt_dts);
470  if (frame->format == AV_SAMPLE_FMT_NONE)
471  frame->format = avctx->sample_fmt;
472  if (!frame->channel_layout)
473  frame->channel_layout = avctx->channel_layout;
474  if (!frame->channels)
475  frame->channels = avctx->channels;
476  if (!frame->sample_rate)
477  frame->sample_rate = avctx->sample_rate;
478  }
479 
481  if(side && side_size>=10) {
483  discard_padding = AV_RL32(side + 4);
484  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
485  avctx->internal->skip_samples, (int)discard_padding);
486  skip_reason = AV_RL8(side + 8);
487  discard_reason = AV_RL8(side + 9);
488  }
489 
490  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
491  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
492  avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
493  got_frame = 0;
494  }
495 
496  if (avctx->internal->skip_samples > 0 && got_frame &&
497  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
498  if(frame->nb_samples <= avctx->internal->skip_samples){
499  got_frame = 0;
500  avctx->internal->skip_samples -= frame->nb_samples;
501  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
502  avctx->internal->skip_samples);
503  } else {
505  frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
506  if(avctx->pkt_timebase.num && avctx->sample_rate) {
507  int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
508  (AVRational){1, avctx->sample_rate},
509  avctx->pkt_timebase);
510  if(frame->pts!=AV_NOPTS_VALUE)
511  frame->pts += diff_ts;
512 #if FF_API_PKT_PTS
514  if(frame->pkt_pts!=AV_NOPTS_VALUE)
515  frame->pkt_pts += diff_ts;
517 #endif
518  if(frame->pkt_dts!=AV_NOPTS_VALUE)
519  frame->pkt_dts += diff_ts;
520  if (frame->pkt_duration >= diff_ts)
521  frame->pkt_duration -= diff_ts;
522  } else {
523  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
524  }
525  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
526  avctx->internal->skip_samples, frame->nb_samples);
527  frame->nb_samples -= avctx->internal->skip_samples;
528  avctx->internal->skip_samples = 0;
529  }
530  }
531 
532  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
533  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
534  if (discard_padding == frame->nb_samples) {
535  got_frame = 0;
536  } else {
537  if(avctx->pkt_timebase.num && avctx->sample_rate) {
538  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
539  (AVRational){1, avctx->sample_rate},
540  avctx->pkt_timebase);
541  frame->pkt_duration = diff_ts;
542  } else {
543  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
544  }
545  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
546  (int)discard_padding, frame->nb_samples);
547  frame->nb_samples -= discard_padding;
548  }
549  }
550 
551  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
553  if (fside) {
554  AV_WL32(fside->data, avctx->internal->skip_samples);
555  AV_WL32(fside->data + 4, discard_padding);
556  AV_WL8(fside->data + 8, skip_reason);
557  AV_WL8(fside->data + 9, discard_reason);
558  avctx->internal->skip_samples = 0;
559  }
560  }
561  }
562 
563  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
565  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
566  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
567  avci->showed_multi_packet_warning = 1;
568  }
569 
570  if (!got_frame)
571  av_frame_unref(frame);
572 
573  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
574  ret = pkt->size;
575 
576 #if FF_API_AVCTX_TIMEBASE
577  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
578  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
579 #endif
580 
581  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
582  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
583  if (avctx->internal->draining && !actual_got_frame) {
584  if (ret < 0) {
585  /* prevent infinite loop if a decoder wrongly always return error on draining */
586  /* reasonable nb_errors_max = maximum b frames + thread count */
587  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
588  avctx->thread_count : 1);
589 
590  if (avci->nb_draining_errors++ >= nb_errors_max) {
591  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
592  "Stop draining and force EOF.\n");
593  avci->draining_done = 1;
594  ret = AVERROR_BUG;
595  }
596  } else {
597  avci->draining_done = 1;
598  }
599  }
600 
601  avci->compat_decode_consumed += ret;
602 
603  if (ret >= pkt->size || ret < 0) {
604  av_packet_unref(pkt);
605  } else {
606  int consumed = ret;
607 
608  pkt->data += consumed;
609  pkt->size -= consumed;
610  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
611  pkt->pts = AV_NOPTS_VALUE;
612  pkt->dts = AV_NOPTS_VALUE;
615  }
616 
617  if (got_frame)
618  av_assert0(frame->buf[0]);
619 
620  return ret < 0 ? ret : 0;
621 }
622 
624 {
625  int ret;
626 
627  while (!frame->buf[0]) {
628  ret = decode_simple_internal(avctx, frame);
629  if (ret < 0)
630  return ret;
631  }
632 
633  return 0;
634 }
635 
637 {
638  AVCodecInternal *avci = avctx->internal;
639  int ret;
640 
641  av_assert0(!frame->buf[0]);
642 
643  if (avctx->codec->receive_frame)
644  ret = avctx->codec->receive_frame(avctx, frame);
645  else
646  ret = decode_simple_receive_frame(avctx, frame);
647 
648  if (ret == AVERROR_EOF)
649  avci->draining_done = 1;
650 
651  if (!ret) {
652  /* the only case where decode data is not set should be decoders
653  * that do not call ff_get_buffer() */
654  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
655  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
656 
657  if (frame->private_ref) {
659 
660  if (fdd->post_process) {
661  ret = fdd->post_process(avctx, frame);
662  if (ret < 0) {
663  av_frame_unref(frame);
664  return ret;
665  }
666  }
667  }
668  }
669 
670  /* free the per-frame decode data */
671  av_buffer_unref(&frame->private_ref);
672 
673  return ret;
674 }
675 
676 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
677 {
678  AVCodecInternal *avci = avctx->internal;
679  int ret;
680 
681  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
682  return AVERROR(EINVAL);
683 
684  if (avctx->internal->draining)
685  return AVERROR_EOF;
686 
687  if (avpkt && !avpkt->size && avpkt->data)
688  return AVERROR(EINVAL);
689 
691  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
692  ret = av_packet_ref(avci->buffer_pkt, avpkt);
693  if (ret < 0)
694  return ret;
695  }
696 
697  ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
698  if (ret < 0) {
700  return ret;
701  }
702 
703  if (!avci->buffer_frame->buf[0]) {
704  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
705  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
706  return ret;
707  }
708 
709  return 0;
710 }
711 
713 {
714  /* make sure we are noisy about decoders returning invalid cropping data */
715  if (frame->crop_left >= INT_MAX - frame->crop_right ||
716  frame->crop_top >= INT_MAX - frame->crop_bottom ||
717  (frame->crop_left + frame->crop_right) >= frame->width ||
718  (frame->crop_top + frame->crop_bottom) >= frame->height) {
719  av_log(avctx, AV_LOG_WARNING,
720  "Invalid cropping information set by a decoder: "
722  "(frame size %dx%d). This is a bug, please report it\n",
723  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
724  frame->width, frame->height);
725  frame->crop_left = 0;
726  frame->crop_right = 0;
727  frame->crop_top = 0;
728  frame->crop_bottom = 0;
729  return 0;
730  }
731 
732  if (!avctx->apply_cropping)
733  return 0;
734 
735  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
737 }
738 
739 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
740 {
741  AVCodecInternal *avci = avctx->internal;
742  int ret, changed;
743 
744  av_frame_unref(frame);
745 
746  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
747  return AVERROR(EINVAL);
748 
749  if (avci->buffer_frame->buf[0]) {
750  av_frame_move_ref(frame, avci->buffer_frame);
751  } else {
752  ret = decode_receive_frame_internal(avctx, frame);
753  if (ret < 0)
754  return ret;
755  }
756 
757  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
758  ret = apply_cropping(avctx, frame);
759  if (ret < 0) {
760  av_frame_unref(frame);
761  return ret;
762  }
763  }
764 
765  avctx->frame_number++;
766 
767  if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
768 
769  if (avctx->frame_number == 1) {
770  avci->initial_format = frame->format;
771  switch(avctx->codec_type) {
772  case AVMEDIA_TYPE_VIDEO:
773  avci->initial_width = frame->width;
774  avci->initial_height = frame->height;
775  break;
776  case AVMEDIA_TYPE_AUDIO:
777  avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
778  avctx->sample_rate;
779  avci->initial_channels = frame->channels;
780  avci->initial_channel_layout = frame->channel_layout;
781  break;
782  }
783  }
784 
785  if (avctx->frame_number > 1) {
786  changed = avci->initial_format != frame->format;
787 
788  switch(avctx->codec_type) {
789  case AVMEDIA_TYPE_VIDEO:
790  changed |= avci->initial_width != frame->width ||
791  avci->initial_height != frame->height;
792  break;
793  case AVMEDIA_TYPE_AUDIO:
794  changed |= avci->initial_sample_rate != frame->sample_rate ||
795  avci->initial_sample_rate != avctx->sample_rate ||
796  avci->initial_channels != frame->channels ||
797  avci->initial_channel_layout != frame->channel_layout;
798  break;
799  }
800 
801  if (changed) {
802  avci->changed_frames_dropped++;
803  av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
804  " drop count: %d \n",
805  avctx->frame_number, frame->pts,
806  avci->changed_frames_dropped);
807  av_frame_unref(frame);
808  return AVERROR_INPUT_CHANGED;
809  }
810  }
811  }
812  return 0;
813 }
814 
816  int *got_frame, const AVPacket *pkt)
817 {
818  AVCodecInternal *avci = avctx->internal;
819  int ret = 0;
820 
822 
823  if (avci->draining_done && pkt && pkt->size != 0) {
824  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
825  avcodec_flush_buffers(avctx);
826  }
827 
828  *got_frame = 0;
829  avci->compat_decode = 1;
830 
831  if (avci->compat_decode_partial_size > 0 &&
832  avci->compat_decode_partial_size != pkt->size) {
833  av_log(avctx, AV_LOG_ERROR,
834  "Got unexpected packet size after a partial decode\n");
835  ret = AVERROR(EINVAL);
836  goto finish;
837  }
838 
839  if (!avci->compat_decode_partial_size) {
840  ret = avcodec_send_packet(avctx, pkt);
841  if (ret == AVERROR_EOF)
842  ret = 0;
843  else if (ret == AVERROR(EAGAIN)) {
844  /* we fully drain all the output in each decode call, so this should not
845  * ever happen */
846  ret = AVERROR_BUG;
847  goto finish;
848  } else if (ret < 0)
849  goto finish;
850  }
851 
852  while (ret >= 0) {
853  ret = avcodec_receive_frame(avctx, frame);
854  if (ret < 0) {
855  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
856  ret = 0;
857  goto finish;
858  }
859 
860  if (frame != avci->compat_decode_frame) {
861  if (!avctx->refcounted_frames) {
862  ret = unrefcount_frame(avci, frame);
863  if (ret < 0)
864  goto finish;
865  }
866 
867  *got_frame = 1;
868  frame = avci->compat_decode_frame;
869  } else {
870  if (!avci->compat_decode_warned) {
871  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
872  "API cannot return all the frames for this decoder. "
873  "Some frames will be dropped. Update your code to the "
874  "new decoding API to fix this.\n");
875  avci->compat_decode_warned = 1;
876  }
877  }
878 
879  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
880  break;
881  }
882 
883 finish:
884  if (ret == 0) {
885  /* if there are any bsfs then assume full packet is always consumed */
886  if (avctx->codec->bsfs)
887  ret = pkt->size;
888  else
889  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
890  }
891  avci->compat_decode_consumed = 0;
892  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
893 
894  return ret;
895 }
896 
897 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
898  int *got_picture_ptr,
899  const AVPacket *avpkt)
900 {
901  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
902 }
903 
904 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
905  AVFrame *frame,
906  int *got_frame_ptr,
907  const AVPacket *avpkt)
908 {
909  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
910 }
911 
913 {
914  memset(sub, 0, sizeof(*sub));
915  sub->pts = AV_NOPTS_VALUE;
916 }
917 
918 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
919 static int recode_subtitle(AVCodecContext *avctx,
920  AVPacket *outpkt, const AVPacket *inpkt)
921 {
922 #if CONFIG_ICONV
923  iconv_t cd = (iconv_t)-1;
924  int ret = 0;
925  char *inb, *outb;
926  size_t inl, outl;
927  AVPacket tmp;
928 #endif
929 
930  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
931  return 0;
932 
933 #if CONFIG_ICONV
934  cd = iconv_open("UTF-8", avctx->sub_charenc);
935  av_assert0(cd != (iconv_t)-1);
936 
937  inb = inpkt->data;
938  inl = inpkt->size;
939 
940  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
941  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
942  ret = AVERROR(ENOMEM);
943  goto end;
944  }
945 
946  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
947  if (ret < 0)
948  goto end;
949  outpkt->buf = tmp.buf;
950  outpkt->data = tmp.data;
951  outpkt->size = tmp.size;
952  outb = outpkt->data;
953  outl = outpkt->size;
954 
955  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
956  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
957  outl >= outpkt->size || inl != 0) {
958  ret = FFMIN(AVERROR(errno), -1);
959  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
960  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
961  av_packet_unref(&tmp);
962  goto end;
963  }
964  outpkt->size -= outl;
965  memset(outpkt->data + outpkt->size, 0, outl);
966 
967 end:
968  if (cd != (iconv_t)-1)
969  iconv_close(cd);
970  return ret;
971 #else
972  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
973  return AVERROR(EINVAL);
974 #endif
975 }
976 
977 static int utf8_check(const uint8_t *str)
978 {
979  const uint8_t *byte;
980  uint32_t codepoint, min;
981 
982  while (*str) {
983  byte = str;
984  GET_UTF8(codepoint, *(byte++), return 0;);
985  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
986  1 << (5 * (byte - str) - 4);
987  if (codepoint < min || codepoint >= 0x110000 ||
988  codepoint == 0xFFFE /* BOM */ ||
989  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
990  return 0;
991  str = byte;
992  }
993  return 1;
994 }
995 
996 #if FF_API_ASS_TIMING
997 static void insert_ts(AVBPrint *buf, int ts)
998 {
999  if (ts == -1) {
1000  av_bprintf(buf, "9:59:59.99,");
1001  } else {
1002  int h, m, s;
1003 
1004  h = ts/360000; ts -= 360000*h;
1005  m = ts/ 6000; ts -= 6000*m;
1006  s = ts/ 100; ts -= 100*s;
1007  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
1008  }
1009 }
1010 
1012 {
1013  int i;
1014  AVBPrint buf;
1015 
1017 
1018  for (i = 0; i < sub->num_rects; i++) {
1019  char *final_dialog;
1020  const char *dialog;
1021  AVSubtitleRect *rect = sub->rects[i];
1022  int ts_start, ts_duration = -1;
1023  long int layer;
1024 
1025  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
1026  continue;
1027 
1028  av_bprint_clear(&buf);
1029 
1030  /* skip ReadOrder */
1031  dialog = strchr(rect->ass, ',');
1032  if (!dialog)
1033  continue;
1034  dialog++;
1035 
1036  /* extract Layer or Marked */
1037  layer = strtol(dialog, (char**)&dialog, 10);
1038  if (*dialog != ',')
1039  continue;
1040  dialog++;
1041 
1042  /* rescale timing to ASS time base (ms) */
1043  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
1044  if (pkt->duration != -1)
1045  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
1046  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
1047 
1048  /* construct ASS (standalone file form with timestamps) string */
1049  av_bprintf(&buf, "Dialogue: %ld,", layer);
1050  insert_ts(&buf, ts_start);
1051  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
1052  av_bprintf(&buf, "%s\r\n", dialog);
1053 
1054  final_dialog = av_strdup(buf.str);
1055  if (!av_bprint_is_complete(&buf) || !final_dialog) {
1056  av_freep(&final_dialog);
1057  av_bprint_finalize(&buf, NULL);
1058  return AVERROR(ENOMEM);
1059  }
1060  av_freep(&rect->ass);
1061  rect->ass = final_dialog;
1062  }
1063 
1064  av_bprint_finalize(&buf, NULL);
1065  return 0;
1066 }
1067 #endif
1068 
1070  int *got_sub_ptr,
1071  AVPacket *avpkt)
1072 {
1073  int i, ret = 0;
1074 
1075  if (!avpkt->data && avpkt->size) {
1076  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1077  return AVERROR(EINVAL);
1078  }
1079  if (!avctx->codec)
1080  return AVERROR(EINVAL);
1081  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1082  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1083  return AVERROR(EINVAL);
1084  }
1085 
1086  *got_sub_ptr = 0;
1087  get_subtitle_defaults(sub);
1088 
1089  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1090  AVPacket pkt_recoded = *avpkt;
1091 
1092  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1093  if (ret < 0) {
1094  *got_sub_ptr = 0;
1095  } else {
1096  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1097  if (ret < 0)
1098  return ret;
1099 
1100  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1101  sub->pts = av_rescale_q(avpkt->pts,
1102  avctx->pkt_timebase, AV_TIME_BASE_Q);
1103  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1104  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1105  !!*got_sub_ptr >= !!sub->num_rects);
1106 
1107 #if FF_API_ASS_TIMING
1109  && *got_sub_ptr && sub->num_rects) {
1110  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1111  : avctx->time_base;
1112  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1113  if (err < 0)
1114  ret = err;
1115  }
1116 #endif
1117 
1118  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1119  avctx->pkt_timebase.num) {
1120  AVRational ms = { 1, 1000 };
1121  sub->end_display_time = av_rescale_q(avpkt->duration,
1122  avctx->pkt_timebase, ms);
1123  }
1124 
1126  sub->format = 0;
1127  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1128  sub->format = 1;
1129 
1130  for (i = 0; i < sub->num_rects; i++) {
1132  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1133  av_log(avctx, AV_LOG_ERROR,
1134  "Invalid UTF-8 in decoded subtitles text; "
1135  "maybe missing -sub_charenc option\n");
1136  avsubtitle_free(sub);
1137  ret = AVERROR_INVALIDDATA;
1138  break;
1139  }
1140  }
1141 
1142  if (avpkt->data != pkt_recoded.data) { // did we recode?
1143  /* prevent from destroying side data from original packet */
1144  pkt_recoded.side_data = NULL;
1145  pkt_recoded.side_data_elems = 0;
1146 
1147  av_packet_unref(&pkt_recoded);
1148  }
1149  }
1150 
1151  if (*got_sub_ptr)
1152  avctx->frame_number++;
1153  }
1154 
1155  return ret;
1156 }
1157 
1159  const enum AVPixelFormat *fmt)
1160 {
1161  const AVPixFmtDescriptor *desc;
1162  const AVCodecHWConfig *config;
1163  int i, n;
1164 
1165  // If a device was supplied when the codec was opened, assume that the
1166  // user wants to use it.
1167  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1168  AVHWDeviceContext *device_ctx =
1170  for (i = 0;; i++) {
1171  config = &avctx->codec->hw_configs[i]->public;
1172  if (!config)
1173  break;
1174  if (!(config->methods &
1176  continue;
1177  if (device_ctx->type != config->device_type)
1178  continue;
1179  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1180  if (config->pix_fmt == fmt[n])
1181  return fmt[n];
1182  }
1183  }
1184  }
1185  // No device or other setup, so we have to choose from things which
1186  // don't any other external information.
1187 
1188  // If the last element of the list is a software format, choose it
1189  // (this should be best software format if any exist).
1190  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1191  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1192  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1193  return fmt[n - 1];
1194 
1195  // Finally, traverse the list in order and choose the first entry
1196  // with no external dependencies (if there is no hardware configuration
1197  // information available then this just picks the first entry).
1198  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1199  for (i = 0;; i++) {
1200  config = avcodec_get_hw_config(avctx->codec, i);
1201  if (!config)
1202  break;
1203  if (config->pix_fmt == fmt[n])
1204  break;
1205  }
1206  if (!config) {
1207  // No specific config available, so the decoder must be able
1208  // to handle this format without any additional setup.
1209  return fmt[n];
1210  }
1212  // Usable with only internal setup.
1213  return fmt[n];
1214  }
1215  }
1216 
1217  // Nothing is usable, give up.
1218  return AV_PIX_FMT_NONE;
1219 }
1220 
1222  enum AVHWDeviceType dev_type)
1223 {
1224  AVHWDeviceContext *device_ctx;
1225  AVHWFramesContext *frames_ctx;
1226  int ret;
1227 
1228  if (!avctx->hwaccel)
1229  return AVERROR(ENOSYS);
1230 
1231  if (avctx->hw_frames_ctx)
1232  return 0;
1233  if (!avctx->hw_device_ctx) {
1234  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1235  "required for hardware accelerated decoding.\n");
1236  return AVERROR(EINVAL);
1237  }
1238 
1239  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1240  if (device_ctx->type != dev_type) {
1241  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1242  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1243  av_hwdevice_get_type_name(device_ctx->type));
1244  return AVERROR(EINVAL);
1245  }
1246 
1248  avctx->hw_device_ctx,
1249  avctx->hwaccel->pix_fmt,
1250  &avctx->hw_frames_ctx);
1251  if (ret < 0)
1252  return ret;
1253 
1254  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1255 
1256 
1257  if (frames_ctx->initial_pool_size) {
1258  // We guarantee 4 base work surfaces. The function above guarantees 1
1259  // (the absolute minimum), so add the missing count.
1260  frames_ctx->initial_pool_size += 3;
1261  }
1262 
1263  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1264  if (ret < 0) {
1265  av_buffer_unref(&avctx->hw_frames_ctx);
1266  return ret;
1267  }
1268 
1269  return 0;
1270 }
1271 
1273  AVBufferRef *device_ref,
1275  AVBufferRef **out_frames_ref)
1276 {
1277  AVBufferRef *frames_ref = NULL;
1278  const AVCodecHWConfigInternal *hw_config;
1279  const AVHWAccel *hwa;
1280  int i, ret;
1281 
1282  for (i = 0;; i++) {
1283  hw_config = avctx->codec->hw_configs[i];
1284  if (!hw_config)
1285  return AVERROR(ENOENT);
1286  if (hw_config->public.pix_fmt == hw_pix_fmt)
1287  break;
1288  }
1289 
1290  hwa = hw_config->hwaccel;
1291  if (!hwa || !hwa->frame_params)
1292  return AVERROR(ENOENT);
1293 
1294  frames_ref = av_hwframe_ctx_alloc(device_ref);
1295  if (!frames_ref)
1296  return AVERROR(ENOMEM);
1297 
1298  ret = hwa->frame_params(avctx, frames_ref);
1299  if (ret >= 0) {
1300  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1301 
1302  if (frames_ctx->initial_pool_size) {
1303  // If the user has requested that extra output surfaces be
1304  // available then add them here.
1305  if (avctx->extra_hw_frames > 0)
1306  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1307 
1308  // If frame threading is enabled then an extra surface per thread
1309  // is also required.
1310  if (avctx->active_thread_type & FF_THREAD_FRAME)
1311  frames_ctx->initial_pool_size += avctx->thread_count;
1312  }
1313 
1314  *out_frames_ref = frames_ref;
1315  } else {
1316  av_buffer_unref(&frames_ref);
1317  }
1318  return ret;
1319 }
1320 
1321 static int hwaccel_init(AVCodecContext *avctx,
1322  const AVCodecHWConfigInternal *hw_config)
1323 {
1324  const AVHWAccel *hwaccel;
1325  int err;
1326 
1327  hwaccel = hw_config->hwaccel;
1330  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1331  hwaccel->name);
1332  return AVERROR_PATCHWELCOME;
1333  }
1334 
1335  if (hwaccel->priv_data_size) {
1336  avctx->internal->hwaccel_priv_data =
1337  av_mallocz(hwaccel->priv_data_size);
1338  if (!avctx->internal->hwaccel_priv_data)
1339  return AVERROR(ENOMEM);
1340  }
1341 
1342  avctx->hwaccel = hwaccel;
1343  if (hwaccel->init) {
1344  err = hwaccel->init(avctx);
1345  if (err < 0) {
1346  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1347  "hwaccel initialisation returned error.\n",
1348  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1350  avctx->hwaccel = NULL;
1351  return err;
1352  }
1353  }
1354 
1355  return 0;
1356 }
1357 
1358 static void hwaccel_uninit(AVCodecContext *avctx)
1359 {
1360  if (avctx->hwaccel && avctx->hwaccel->uninit)
1361  avctx->hwaccel->uninit(avctx);
1362 
1364 
1365  avctx->hwaccel = NULL;
1366 
1367  av_buffer_unref(&avctx->hw_frames_ctx);
1368 }
1369 
1371 {
1372  const AVPixFmtDescriptor *desc;
1373  enum AVPixelFormat *choices;
1374  enum AVPixelFormat ret, user_choice;
1375  const AVCodecHWConfigInternal *hw_config;
1376  const AVCodecHWConfig *config;
1377  int i, n, err;
1378 
1379  // Find end of list.
1380  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1381  // Must contain at least one entry.
1382  av_assert0(n >= 1);
1383  // If a software format is available, it must be the last entry.
1384  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1385  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1386  // No software format is available.
1387  } else {
1388  avctx->sw_pix_fmt = fmt[n - 1];
1389  }
1390 
1391  choices = av_malloc_array(n + 1, sizeof(*choices));
1392  if (!choices)
1393  return AV_PIX_FMT_NONE;
1394 
1395  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1396 
1397  for (;;) {
1398  // Remove the previous hwaccel, if there was one.
1399  hwaccel_uninit(avctx);
1400 
1401  user_choice = avctx->get_format(avctx, choices);
1402  if (user_choice == AV_PIX_FMT_NONE) {
1403  // Explicitly chose nothing, give up.
1404  ret = AV_PIX_FMT_NONE;
1405  break;
1406  }
1407 
1408  desc = av_pix_fmt_desc_get(user_choice);
1409  if (!desc) {
1410  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1411  "get_format() callback.\n");
1412  ret = AV_PIX_FMT_NONE;
1413  break;
1414  }
1415  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1416  desc->name);
1417 
1418  for (i = 0; i < n; i++) {
1419  if (choices[i] == user_choice)
1420  break;
1421  }
1422  if (i == n) {
1423  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1424  "%s not in possible list.\n", desc->name);
1425  ret = AV_PIX_FMT_NONE;
1426  break;
1427  }
1428 
1429  if (avctx->codec->hw_configs) {
1430  for (i = 0;; i++) {
1431  hw_config = avctx->codec->hw_configs[i];
1432  if (!hw_config)
1433  break;
1434  if (hw_config->public.pix_fmt == user_choice)
1435  break;
1436  }
1437  } else {
1438  hw_config = NULL;
1439  }
1440 
1441  if (!hw_config) {
1442  // No config available, so no extra setup required.
1443  ret = user_choice;
1444  break;
1445  }
1446  config = &hw_config->public;
1447 
1448  if (config->methods &
1450  avctx->hw_frames_ctx) {
1451  const AVHWFramesContext *frames_ctx =
1453  if (frames_ctx->format != user_choice) {
1454  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1455  "does not match the format of the provided frames "
1456  "context.\n", desc->name);
1457  goto try_again;
1458  }
1459  } else if (config->methods &
1461  avctx->hw_device_ctx) {
1462  const AVHWDeviceContext *device_ctx =
1464  if (device_ctx->type != config->device_type) {
1465  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1466  "does not match the type of the provided device "
1467  "context.\n", desc->name);
1468  goto try_again;
1469  }
1470  } else if (config->methods &
1472  // Internal-only setup, no additional configuration.
1473  } else if (config->methods &
1475  // Some ad-hoc configuration we can't see and can't check.
1476  } else {
1477  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1478  "missing configuration.\n", desc->name);
1479  goto try_again;
1480  }
1481  if (hw_config->hwaccel) {
1482  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1483  "initialisation.\n", desc->name);
1484  err = hwaccel_init(avctx, hw_config);
1485  if (err < 0)
1486  goto try_again;
1487  }
1488  ret = user_choice;
1489  break;
1490 
1491  try_again:
1492  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1493  "get_format() without it.\n", desc->name);
1494  for (i = 0; i < n; i++) {
1495  if (choices[i] == user_choice)
1496  break;
1497  }
1498  for (; i + 1 < n; i++)
1499  choices[i] = choices[i + 1];
1500  --n;
1501  }
1502 
1503  av_freep(&choices);
1504  return ret;
1505 }
1506 
1508 {
1509  FramePool *pool = avctx->internal->pool;
1510  int i, ret;
1511 
1512  switch (avctx->codec_type) {
1513  case AVMEDIA_TYPE_VIDEO: {
1514  uint8_t *data[4];
1515  int linesize[4];
1516  int size[4] = { 0 };
1517  int w = frame->width;
1518  int h = frame->height;
1519  int tmpsize, unaligned;
1520 
1521  if (pool->format == frame->format &&
1522  pool->width == frame->width && pool->height == frame->height)
1523  return 0;
1524 
1525  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1526 
1527  do {
1528  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1529  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1530  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1531  if (ret < 0)
1532  return ret;
1533  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1534  w += w & ~(w - 1);
1535 
1536  unaligned = 0;
1537  for (i = 0; i < 4; i++)
1538  unaligned |= linesize[i] % pool->stride_align[i];
1539  } while (unaligned);
1540 
1541  tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
1542  NULL, linesize);
1543  if (tmpsize < 0)
1544  return tmpsize;
1545 
1546  for (i = 0; i < 3 && data[i + 1]; i++)
1547  size[i] = data[i + 1] - data[i];
1548  size[i] = tmpsize - (data[i] - data[0]);
1549 
1550  for (i = 0; i < 4; i++) {
1551  av_buffer_pool_uninit(&pool->pools[i]);
1552  pool->linesize[i] = linesize[i];
1553  if (size[i]) {
1554  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1555  CONFIG_MEMORY_POISONING ?
1556  NULL :
1558  if (!pool->pools[i]) {
1559  ret = AVERROR(ENOMEM);
1560  goto fail;
1561  }
1562  }
1563  }
1564  pool->format = frame->format;
1565  pool->width = frame->width;
1566  pool->height = frame->height;
1567 
1568  break;
1569  }
1570  case AVMEDIA_TYPE_AUDIO: {
1571  int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout);
1572  int planar = av_sample_fmt_is_planar(frame->format);
1573  int planes = planar ? ch : 1;
1574 
1575  if (pool->format == frame->format && pool->planes == planes &&
1576  pool->channels == ch && frame->nb_samples == pool->samples)
1577  return 0;
1578 
1579  av_buffer_pool_uninit(&pool->pools[0]);
1580  ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1581  frame->nb_samples, frame->format, 0);
1582  if (ret < 0)
1583  goto fail;
1584 
1585  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1586  if (!pool->pools[0]) {
1587  ret = AVERROR(ENOMEM);
1588  goto fail;
1589  }
1590 
1591  pool->format = frame->format;
1592  pool->planes = planes;
1593  pool->channels = ch;
1594  pool->samples = frame->nb_samples;
1595  break;
1596  }
1597  default: av_assert0(0);
1598  }
1599  return 0;
1600 fail:
1601  for (i = 0; i < 4; i++)
1602  av_buffer_pool_uninit(&pool->pools[i]);
1603  pool->format = -1;
1604  pool->planes = pool->channels = pool->samples = 0;
1605  pool->width = pool->height = 0;
1606  return ret;
1607 }
1608 
1610 {
1611  FramePool *pool = avctx->internal->pool;
1612  int planes = pool->planes;
1613  int i;
1614 
1615  frame->linesize[0] = pool->linesize[0];
1616 
1617  if (planes > AV_NUM_DATA_POINTERS) {
1618  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1619  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
1621  sizeof(*frame->extended_buf));
1622  if (!frame->extended_data || !frame->extended_buf) {
1623  av_freep(&frame->extended_data);
1624  av_freep(&frame->extended_buf);
1625  return AVERROR(ENOMEM);
1626  }
1627  } else {
1628  frame->extended_data = frame->data;
1629  av_assert0(frame->nb_extended_buf == 0);
1630  }
1631 
1632  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1633  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1634  if (!frame->buf[i])
1635  goto fail;
1636  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1637  }
1638  for (i = 0; i < frame->nb_extended_buf; i++) {
1639  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1640  if (!frame->extended_buf[i])
1641  goto fail;
1642  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1643  }
1644 
1645  if (avctx->debug & FF_DEBUG_BUFFERS)
1646  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1647 
1648  return 0;
1649 fail:
1650  av_frame_unref(frame);
1651  return AVERROR(ENOMEM);
1652 }
1653 
1655 {
1656  FramePool *pool = s->internal->pool;
1658  int i;
1659 
1660  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1661  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1662  return -1;
1663  }
1664 
1665  if (!desc) {
1666  av_log(s, AV_LOG_ERROR,
1667  "Unable to get pixel format descriptor for format %s\n",
1668  av_get_pix_fmt_name(pic->format));
1669  return AVERROR(EINVAL);
1670  }
1671 
1672  memset(pic->data, 0, sizeof(pic->data));
1673  pic->extended_data = pic->data;
1674 
1675  for (i = 0; i < 4 && pool->pools[i]; i++) {
1676  pic->linesize[i] = pool->linesize[i];
1677 
1678  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1679  if (!pic->buf[i])
1680  goto fail;
1681 
1682  pic->data[i] = pic->buf[i]->data;
1683  }
1684  for (; i < AV_NUM_DATA_POINTERS; i++) {
1685  pic->data[i] = NULL;
1686  pic->linesize[i] = 0;
1687  }
1688  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1689  ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1690  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1691 
1692  if (s->debug & FF_DEBUG_BUFFERS)
1693  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1694 
1695  return 0;
1696 fail:
1697  av_frame_unref(pic);
1698  return AVERROR(ENOMEM);
1699 }
1700 
1702 {
1703  int ret;
1704 
1705  if (avctx->hw_frames_ctx) {
1706  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1707  frame->width = avctx->coded_width;
1708  frame->height = avctx->coded_height;
1709  return ret;
1710  }
1711 
1712  if ((ret = update_frame_pool(avctx, frame)) < 0)
1713  return ret;
1714 
1715  switch (avctx->codec_type) {
1716  case AVMEDIA_TYPE_VIDEO:
1717  return video_get_buffer(avctx, frame);
1718  case AVMEDIA_TYPE_AUDIO:
1719  return audio_get_buffer(avctx, frame);
1720  default:
1721  return -1;
1722  }
1723 }
1724 
1726 {
1727  int size;
1728  const uint8_t *side_metadata;
1729 
1730  AVDictionary **frame_md = &frame->metadata;
1731 
1732  side_metadata = av_packet_get_side_data(avpkt,
1734  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1735 }
1736 
1738 {
1739  const AVPacket *pkt = avctx->internal->last_pkt_props;
1740  int i;
1741  static const struct {
1742  enum AVPacketSideDataType packet;
1744  } sd[] = {
1753  };
1754 
1755  if (pkt) {
1756  frame->pts = pkt->pts;
1757 #if FF_API_PKT_PTS
1759  frame->pkt_pts = pkt->pts;
1761 #endif
1762  frame->pkt_pos = pkt->pos;
1763  frame->pkt_duration = pkt->duration;
1764  frame->pkt_size = pkt->size;
1765 
1766  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1767  int size;
1768  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1769  if (packet_sd) {
1770  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1771  sd[i].frame,
1772  size);
1773  if (!frame_sd)
1774  return AVERROR(ENOMEM);
1775 
1776  memcpy(frame_sd->data, packet_sd, size);
1777  }
1778  }
1779  add_metadata_from_side_data(pkt, frame);
1780 
1781  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1782  frame->flags |= AV_FRAME_FLAG_DISCARD;
1783  } else {
1784  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1785  }
1786  }
1787  frame->reordered_opaque = avctx->reordered_opaque;
1788 
1789  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1790  frame->color_primaries = avctx->color_primaries;
1791  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1792  frame->color_trc = avctx->color_trc;
1793  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1794  frame->colorspace = avctx->colorspace;
1795  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1796  frame->color_range = avctx->color_range;
1798  frame->chroma_location = avctx->chroma_sample_location;
1799 
1800  switch (avctx->codec->type) {
1801  case AVMEDIA_TYPE_VIDEO:
1802  frame->format = avctx->pix_fmt;
1803  if (!frame->sample_aspect_ratio.num)
1804  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1805 
1806  if (frame->width && frame->height &&
1807  av_image_check_sar(frame->width, frame->height,
1808  frame->sample_aspect_ratio) < 0) {
1809  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1810  frame->sample_aspect_ratio.num,
1811  frame->sample_aspect_ratio.den);
1812  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1813  }
1814 
1815  break;
1816  case AVMEDIA_TYPE_AUDIO:
1817  if (!frame->sample_rate)
1818  frame->sample_rate = avctx->sample_rate;
1819  if (frame->format < 0)
1820  frame->format = avctx->sample_fmt;
1821  if (!frame->channel_layout) {
1822  if (avctx->channel_layout) {
1824  avctx->channels) {
1825  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1826  "configuration.\n");
1827  return AVERROR(EINVAL);
1828  }
1829 
1830  frame->channel_layout = avctx->channel_layout;
1831  } else {
1832  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1833  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1834  avctx->channels);
1835  return AVERROR(ENOSYS);
1836  }
1837  }
1838  }
1839  frame->channels = avctx->channels;
1840  break;
1841  }
1842  return 0;
1843 }
1844 
1846 {
1847  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1848  int i;
1849  int num_planes = av_pix_fmt_count_planes(frame->format);
1851  int flags = desc ? desc->flags : 0;
1852  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1853  num_planes = 2;
1854  if ((flags & FF_PSEUDOPAL) && frame->data[1])
1855  num_planes = 2;
1856  for (i = 0; i < num_planes; i++) {
1857  av_assert0(frame->data[i]);
1858  }
1859  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1860  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1861  if (frame->data[i])
1862  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1863  frame->data[i] = NULL;
1864  }
1865  }
1866 }
1867 
1868 static void decode_data_free(void *opaque, uint8_t *data)
1869 {
1870  FrameDecodeData *fdd = (FrameDecodeData*)data;
1871 
1872  if (fdd->post_process_opaque_free)
1874 
1875  if (fdd->hwaccel_priv_free)
1876  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1877 
1878  av_freep(&fdd);
1879 }
1880 
1882 {
1883  AVBufferRef *fdd_buf;
1884  FrameDecodeData *fdd;
1885 
1886  av_assert1(!frame->private_ref);
1887  av_buffer_unref(&frame->private_ref);
1888 
1889  fdd = av_mallocz(sizeof(*fdd));
1890  if (!fdd)
1891  return AVERROR(ENOMEM);
1892 
1893  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1895  if (!fdd_buf) {
1896  av_freep(&fdd);
1897  return AVERROR(ENOMEM);
1898  }
1899 
1900  frame->private_ref = fdd_buf;
1901 
1902  return 0;
1903 }
1904 
1906 {
1907  const AVHWAccel *hwaccel = avctx->hwaccel;
1908  int override_dimensions = 1;
1909  int ret;
1910 
1911  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1912  if ((ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1913  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1914  return AVERROR(EINVAL);
1915  }
1916 
1917  if (frame->width <= 0 || frame->height <= 0) {
1918  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1919  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1920  override_dimensions = 0;
1921  }
1922 
1923  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1924  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1925  return AVERROR(EINVAL);
1926  }
1927  } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1928  if (frame->nb_samples * avctx->channels > avctx->max_samples) {
1929  av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
1930  return AVERROR(EINVAL);
1931  }
1932  }
1933  ret = ff_decode_frame_props(avctx, frame);
1934  if (ret < 0)
1935  return ret;
1936 
1937  if (hwaccel) {
1938  if (hwaccel->alloc_frame) {
1939  ret = hwaccel->alloc_frame(avctx, frame);
1940  goto end;
1941  }
1942  } else
1943  avctx->sw_pix_fmt = avctx->pix_fmt;
1944 
1945  ret = avctx->get_buffer2(avctx, frame, flags);
1946  if (ret < 0)
1947  goto end;
1948 
1949  validate_avframe_allocation(avctx, frame);
1950 
1951  ret = ff_attach_decode_data(frame);
1952  if (ret < 0)
1953  goto end;
1954 
1955 end:
1956  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1958  frame->width = avctx->width;
1959  frame->height = avctx->height;
1960  }
1961 
1962  if (ret < 0)
1963  av_frame_unref(frame);
1964 
1965  return ret;
1966 }
1967 
1969 {
1970  int ret = get_buffer_internal(avctx, frame, flags);
1971  if (ret < 0) {
1972  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1973  frame->width = frame->height = 0;
1974  }
1975  return ret;
1976 }
1977 
1979 {
1980  AVFrame *tmp;
1981  int ret;
1982 
1984 
1985  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1986  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1987  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1988  av_frame_unref(frame);
1989  }
1990 
1991  if (!frame->data[0])
1992  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1993 
1994  if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
1995  return ff_decode_frame_props(avctx, frame);
1996 
1997  tmp = av_frame_alloc();
1998  if (!tmp)
1999  return AVERROR(ENOMEM);
2000 
2001  av_frame_move_ref(tmp, frame);
2002 
2003  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
2004  if (ret < 0) {
2005  av_frame_free(&tmp);
2006  return ret;
2007  }
2008 
2009  av_frame_copy(frame, tmp);
2010  av_frame_free(&tmp);
2011 
2012  return 0;
2013 }
2014 
2016 {
2017  int ret = reget_buffer_internal(avctx, frame, flags);
2018  if (ret < 0)
2019  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
2020  return ret;
2021 }
2022 
2023 static void bsfs_flush(AVCodecContext *avctx)
2024 {
2025  DecodeFilterContext *s = &avctx->internal->filter;
2026 
2027  for (int i = 0; i < s->nb_bsfs; i++)
2028  av_bsf_flush(s->bsfs[i]);
2029 }
2030 
2032 {
2033  avctx->internal->draining = 0;
2034  avctx->internal->draining_done = 0;
2035  avctx->internal->nb_draining_errors = 0;
2039  avctx->internal->buffer_pkt_valid = 0;
2040 
2041  av_packet_unref(avctx->internal->ds.in_pkt);
2042 
2043  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
2044  ff_thread_flush(avctx);
2045  else if (avctx->codec->flush)
2046  avctx->codec->flush(avctx);
2047 
2048  avctx->pts_correction_last_pts =
2049  avctx->pts_correction_last_dts = INT64_MIN;
2050 
2051  bsfs_flush(avctx);
2052 
2053  if (!avctx->refcounted_frames)
2054  av_frame_unref(avctx->internal->to_free);
2055 }
2056 
2058 {
2059  DecodeFilterContext *s = &avctx->internal->filter;
2060  int i;
2061 
2062  for (i = 0; i < s->nb_bsfs; i++)
2063  av_bsf_free(&s->bsfs[i]);
2064  av_freep(&s->bsfs);
2065  s->nb_bsfs = 0;
2066 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:86
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:60
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2639
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1823
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwaccel.h:34
int nb_draining_errors
Definition: internal.h:220
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:3169
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1370
const struct AVCodec * codec
Definition: avcodec.h:1580
AVRational framerate
Definition: avcodec.h:3111
const char const char void * val
Definition: avisynth_c.h:863
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:3132
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const AVClass * priv_class
A class for the private data, used to declare bitstream filter private AVOptions. ...
Definition: avcodec.h:5845
#define AV_NUM_DATA_POINTERS
Definition: frame.h:296
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5811
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: avcodec.h:1190
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3149
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:385
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int stride_align[AV_NUM_DATA_POINTERS]
Definition: internal.h:112
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:3356
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:636
const struct AVCodecHWConfigInternal ** hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: avcodec.h:3636
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:965
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1759
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:566
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:3696
const char * fmt
Definition: avisynth_c.h:861
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:172
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:1011
int changed_frames_dropped
Definition: internal.h:223
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
AVFrame * to_free
Definition: internal.h:159
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1503
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:912
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:623
const char * desc
Definition: nvenc.c:68
int width
Definition: internal.h:111
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: avcodec.h:1264
ATSC A53 Part 4 Closed Captions.
Definition: avcodec.h:1392
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2206
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:504
int ff_decode_bsfs_init(AVCodecContext *avctx)
Definition: decode.c:185
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5777
int size
Definition: avcodec.h:1484
int initial_channels
Definition: internal.h:227
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:3690
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1950
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: avcodec.h:786
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1781
int samples
Definition: internal.h:116
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:401
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:904
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:208
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:3951
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
static void bsfs_flush(AVCodecContext *avctx)
Definition: decode.c:2023
enum AVMediaType type
Definition: avcodec.h:3508
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:48
static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1905
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:919
AVBufferPool * pools[4]
Pools for each data plane.
Definition: internal.h:105
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1737
size_t crop_bottom
Definition: frame.h:656
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1069
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2735
void * priv_data
Opaque filter-specific private data.
Definition: avcodec.h:5798
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:3709
static const struct @322 planes[]
static int utf8_check(const uint8_t *str)
Definition: decode.c:977
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:135
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:712
void ff_decode_bsfs_uninit(AVCodecContext *avctx)
Definition: decode.c:2057
Mastering display metadata (based on SMPTE-2086:2014).
Definition: avcodec.h:1372
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1694
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:3952
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:99
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:3801
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx)
Allocate a context for a given bitstream filter.
Definition: bsf.c:82
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1529
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1012
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const char * name
Definition: opt.h:247
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:212
DecodeFilterContext filter
Definition: internal.h:166
int height
Definition: internal.h:111
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1158
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2239
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1881
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:523
The codec supports this format by some ad-hoc method.
Definition: avcodec.h:3466
AVOptions.
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:369
size_t crop_left
Definition: frame.h:657
The codec supports this format via the hw_device_ctx interface.
Definition: avcodec.h:3441
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:152
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1501
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1258
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:852
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:674
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:328
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3473
int planes
Definition: internal.h:114
Structure to hold side data for an AVFrame.
Definition: frame.h:201
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:287
size_t compat_decode_consumed
Definition: internal.h:209
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: avcodec.h:1483
#define FF_REGET_BUFFER_FLAG_READONLY
the returned buffer does not need to be writable
Definition: internal.h:323
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:581
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2810
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:3793
ptrdiff_t size
Definition: opengl_enc.c:100
int initial_height
Definition: internal.h:225
int initial_format
Definition: internal.h:224
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:198
static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
Definition: decode.c:291
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2213
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:608
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
FramePool * pool
Definition: internal.h:161
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:154
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:86
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3125
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2040
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1861
void * post_process_opaque
Definition: decode.h:46
#define AV_BPRINT_SIZE_UNLIMITED
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1321
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1845
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: avcodec.h:1222
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:881
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:3150
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:2015
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2849
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: avcodec.h:3478
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:1898
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:739
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
Definition: decode.c:126
AVFrame * buffer_frame
Definition: internal.h:202
int capabilities
Codec capabilities.
Definition: avcodec.h:3514
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:539
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1466
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1651
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:550
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5817
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
const AVOption * av_opt_next(const void *obj, const AVOption *last)
Iterate over all AVOptions belonging to obj.
Definition: opt.c:45
int side_data_elems
Definition: avcodec.h:1495
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:3298
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:329
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:78
#define FFMAX(a, b)
Definition: common.h:94
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:465
#define fail()
Definition: checkasm.h:122
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
Definition: avstring.c:149
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwaccel.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:3807
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1489
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2282
uint32_t end_display_time
Definition: avcodec.h:3950
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3953
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: avcodec.h:738
size_t crop_top
Definition: frame.h:655
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:198
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:565
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:532
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2700
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2841
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:3669
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:500
int channels
Definition: internal.h:115
AVFrame * compat_decode_frame
Definition: internal.h:213
int width
picture width / height.
Definition: avcodec.h:1744
uint8_t w
Definition: llviddspenc.c:38
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3268
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1725
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5823
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:815
AVPacket * in_pkt
Definition: internal.h:120
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: avcodec.h:1378
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:186
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2185
AVFrameSideDataType
Definition: frame.h:48
#define AVERROR_INPUT_CHANGED
Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) ...
Definition: error.h:73
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint16_t format
Definition: avcodec.h:3948
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:2678
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:2728
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2711
int n
Definition: avisynth_c.h:760
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: avcodec.h:3627
DecodeSimpleContext ds
Definition: internal.h:165
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:3158
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1609
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:195
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2830
int linesize[4]
Definition: internal.h:113
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:3166
if(ret)
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:2031
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
Content light level (based on CTA-861.3).
Definition: avcodec.h:1385
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:676
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:47
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1701
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:1579
int compat_decode_warned
Definition: internal.h:206
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:574
A list of zero terminated key/value strings.
Definition: avcodec.h:1322
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:897
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
int sample_rate
samples per second
Definition: avcodec.h:2231
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
int initial_sample_rate
Definition: internal.h:226
int debug
debug
Definition: avcodec.h:2656
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1784
main external API structure.
Definition: avcodec.h:1571
int(* receive_frame)(AVCodecContext *avctx, AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: avcodec.h:3611
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
int skip_samples_multiplier
Definition: internal.h:217
uint8_t * data
The data buffer.
Definition: buffer.h:89
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1978
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1081
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1968
uint8_t * data
Definition: frame.h:203
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: avcodec.h:781
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void * buf
Definition: avisynth_c.h:766
size_t crop_right
Definition: frame.h:658
int64_t max_samples
The number of samples per frame to maximally accept.
Definition: avcodec.h:3386
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:1759
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:462
int sample_rate
Sample rate of the audio data.
Definition: frame.h:467
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2386
int showed_multi_packet_warning
Definition: internal.h:215
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:940
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:88
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:1030
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:275
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2199
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2192
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2058
Recommmends skipping the specified number of samples.
Definition: avcodec.h:1306
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:3275
int buffer_pkt_valid
Definition: internal.h:201
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:185
#define STRIDE_ALIGN
Definition: internal.h:97
enum AVChromaLocation chroma_location
Definition: frame.h:552
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:3822
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:559
The codec supports this format by some internal method.
Definition: avcodec.h:3457
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:2402
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define AV_CODEC_FLAG_DROPCHANGED
Don&#39;t output frames whose parameters differ from first decoded frame in stream.
Definition: avcodec.h:873
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:262
#define flags(name, subs,...)
Definition: cbs_av1.c:561
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: avcodec.h:1249
The codec supports this format via the hw_frames_ctx interface.
Definition: avcodec.h:3450
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1221
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1868
#define UTF8_MAX_BYTES
Definition: decode.c:918
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:396
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:200
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:404
A reference to a data buffer.
Definition: buffer.h:81
int extra_hw_frames
Definition: avcodec.h:3370
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:139
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1494
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1272
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:46
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: avcodec.h:1522
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int(* decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt)
Definition: avcodec.h:3593
common internal api header.
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:238
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:3829
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1051
void(* flush)(AVCodecContext *)
Flush buffers.
Definition: avcodec.h:3616
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:243
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:997
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:190
int caps_internal
Internal codec capabilities.
Definition: avcodec.h:3621
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:253
uint64_t initial_channel_layout
Definition: internal.h:228
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:796
AVBSFContext ** bsfs
Definition: internal.h:125
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1654
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:3907
#define FF_PSEUDOPAL
Definition: internal.h:367
AVHWDeviceType
Definition: hwcontext.h:27
void ff_thread_flush(AVCodecContext *avctx)
Wait for decoding threads to finish and reset internal state.
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int channels
number of audio channels
Definition: avcodec.h:2232
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1606
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:3942
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn&#39;t be output.
Definition: frame.h:524
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1658
enum AVColorPrimaries color_primaries
Definition: frame.h:541
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1482
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:3151
size_t compat_decode_partial_size
Definition: internal.h:212
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:898
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2262
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1507
int height
Definition: frame.h:353
void av_bsf_flush(AVBSFContext *ctx)
Reset the internal bitstream filter state / flush internal buffers.
Definition: bsf.c:176
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3148
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:543
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:334
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: avcodec.h:3485
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:3278
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:3170
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVSubtitleType type
Definition: avcodec.h:3933
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
int format
Definition: internal.h:110
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3320
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:610
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1460
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5805
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1182
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2634
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:987
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1476
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:3118
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1358
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: avcodec.h:1270
static uint8_t tmp[11]
Definition: aes_ctr.c:26