FFmpeg
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40 
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwaccel.h"
45 #include "internal.h"
46 #include "thread.h"
47 
48 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
49 {
50  int size = 0, ret;
51  const uint8_t *data;
52  uint32_t flags;
53  int64_t val;
54 
56  if (!data)
57  return 0;
58 
59  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
60  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
61  "changes, but PARAM_CHANGE side data was sent to it.\n");
62  ret = AVERROR(EINVAL);
63  goto fail2;
64  }
65 
66  if (size < 4)
67  goto fail;
68 
69  flags = bytestream_get_le32(&data);
70  size -= 4;
71 
73  if (size < 4)
74  goto fail;
75  val = bytestream_get_le32(&data);
76  if (val <= 0 || val > INT_MAX) {
77  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
79  goto fail2;
80  }
81  avctx->channels = val;
82  size -= 4;
83  }
85  if (size < 8)
86  goto fail;
87  avctx->channel_layout = bytestream_get_le64(&data);
88  size -= 8;
89  }
91  if (size < 4)
92  goto fail;
93  val = bytestream_get_le32(&data);
94  if (val <= 0 || val > INT_MAX) {
95  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
97  goto fail2;
98  }
99  avctx->sample_rate = val;
100  size -= 4;
101  }
103  if (size < 8)
104  goto fail;
105  avctx->width = bytestream_get_le32(&data);
106  avctx->height = bytestream_get_le32(&data);
107  size -= 8;
108  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
109  if (ret < 0)
110  goto fail2;
111  }
112 
113  return 0;
114 fail:
115  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
117 fail2:
118  if (ret < 0) {
119  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
120  if (avctx->err_recognition & AV_EF_EXPLODE)
121  return ret;
122  }
123  return 0;
124 }
125 
127 {
128  int ret = 0;
129 
131  if (pkt) {
132  ret = av_packet_copy_props(avci->last_pkt_props, pkt);
133  if (!ret)
134  avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_decode_frame_props().
135  }
136  return ret;
137 }
138 
140 {
141  int ret;
142 
143  /* move the original frame to our backup */
144  av_frame_unref(avci->to_free);
145  av_frame_move_ref(avci->to_free, frame);
146 
147  /* now copy everything except the AVBufferRefs back
148  * note that we make a COPY of the side data, so calling av_frame_free() on
149  * the caller's frame will work properly */
150  ret = av_frame_copy_props(frame, avci->to_free);
151  if (ret < 0)
152  return ret;
153 
154  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
155  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
156  if (avci->to_free->extended_data != avci->to_free->data) {
157  int planes = avci->to_free->channels;
158  int size = planes * sizeof(*frame->extended_data);
159 
160  if (!size) {
161  av_frame_unref(frame);
162  return AVERROR_BUG;
163  }
164 
165  frame->extended_data = av_malloc(size);
166  if (!frame->extended_data) {
167  av_frame_unref(frame);
168  return AVERROR(ENOMEM);
169  }
170  memcpy(frame->extended_data, avci->to_free->extended_data,
171  size);
172  } else
173  frame->extended_data = frame->data;
174 
175  frame->format = avci->to_free->format;
176  frame->width = avci->to_free->width;
177  frame->height = avci->to_free->height;
178  frame->channel_layout = avci->to_free->channel_layout;
179  frame->nb_samples = avci->to_free->nb_samples;
180  frame->channels = avci->to_free->channels;
181 
182  return 0;
183 }
184 
186 {
187  AVCodecInternal *avci = avctx->internal;
188  DecodeFilterContext *s = &avci->filter;
189  const char *bsfs_str;
190  int ret;
191 
192  if (s->nb_bsfs)
193  return 0;
194 
195  bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null";
196  while (bsfs_str && *bsfs_str) {
197  AVBSFContext **tmp;
198  const AVBitStreamFilter *filter;
199  char *bsf, *bsf_options_str, *bsf_name;
200 
201  bsf = av_get_token(&bsfs_str, ",");
202  if (!bsf) {
203  ret = AVERROR(ENOMEM);
204  goto fail;
205  }
206  bsf_name = av_strtok(bsf, "=", &bsf_options_str);
207  if (!bsf_name) {
208  av_freep(&bsf);
209  ret = AVERROR(ENOMEM);
210  goto fail;
211  }
212 
213  filter = av_bsf_get_by_name(bsf_name);
214  if (!filter) {
215  av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s "
216  "requested by a decoder. This is a bug, please report it.\n",
217  bsf_name);
218  av_freep(&bsf);
219  ret = AVERROR_BUG;
220  goto fail;
221  }
222 
223  tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs));
224  if (!tmp) {
225  av_freep(&bsf);
226  ret = AVERROR(ENOMEM);
227  goto fail;
228  }
229  s->bsfs = tmp;
230  s->nb_bsfs++;
231 
232  ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]);
233  if (ret < 0) {
234  av_freep(&bsf);
235  goto fail;
236  }
237 
238  if (s->nb_bsfs == 1) {
239  /* We do not currently have an API for passing the input timebase into decoders,
240  * but no filters used here should actually need it.
241  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
242  s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 };
244  avctx);
245  } else {
246  s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out;
247  ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in,
248  s->bsfs[s->nb_bsfs - 2]->par_out);
249  }
250  if (ret < 0) {
251  av_freep(&bsf);
252  goto fail;
253  }
254 
255  if (bsf_options_str && filter->priv_class) {
256  const AVOption *opt = av_opt_next(s->bsfs[s->nb_bsfs - 1]->priv_data, NULL);
257  const char * shorthand[2] = {NULL};
258 
259  if (opt)
260  shorthand[0] = opt->name;
261 
262  ret = av_opt_set_from_string(s->bsfs[s->nb_bsfs - 1]->priv_data, bsf_options_str, shorthand, "=", ":");
263  if (ret < 0) {
264  if (ret != AVERROR(ENOMEM)) {
265  av_log(avctx, AV_LOG_ERROR, "Invalid options for bitstream filter %s "
266  "requested by the decoder. This is a bug, please report it.\n",
267  bsf_name);
268  ret = AVERROR_BUG;
269  }
270  av_freep(&bsf);
271  goto fail;
272  }
273  }
274  av_freep(&bsf);
275 
276  ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]);
277  if (ret < 0)
278  goto fail;
279 
280  if (*bsfs_str)
281  bsfs_str++;
282  }
283 
284  return 0;
285 fail:
286  ff_decode_bsfs_uninit(avctx);
287  return ret;
288 }
289 
290 /* try to get one output packet from the filter chain */
291 static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
292 {
293  DecodeFilterContext *s = &avctx->internal->filter;
294  int idx, ret;
295 
296  /* start with the last filter in the chain */
297  idx = s->nb_bsfs - 1;
298  while (idx >= 0) {
299  /* request a packet from the currently selected filter */
300  ret = av_bsf_receive_packet(s->bsfs[idx], pkt);
301  if (ret == AVERROR(EAGAIN)) {
302  /* no packets available, try the next filter up the chain */
303  ret = 0;
304  idx--;
305  continue;
306  } else if (ret < 0 && ret != AVERROR_EOF) {
307  return ret;
308  }
309 
310  /* got a packet or EOF -- pass it to the caller or to the next filter
311  * down the chain */
312  if (idx == s->nb_bsfs - 1) {
313  return ret;
314  } else {
315  idx++;
316  ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt);
317  if (ret < 0) {
318  av_log(avctx, AV_LOG_ERROR,
319  "Error pre-processing a packet before decoding\n");
320  av_packet_unref(pkt);
321  return ret;
322  }
323  }
324  }
325 
326  return AVERROR(EAGAIN);
327 }
328 
330 {
331  AVCodecInternal *avci = avctx->internal;
332  int ret;
333 
334  if (avci->draining)
335  return AVERROR_EOF;
336 
337  ret = bsfs_poll(avctx, pkt);
338  if (ret == AVERROR_EOF)
339  avci->draining = 1;
340  if (ret < 0)
341  return ret;
342 
343  ret = extract_packet_props(avctx->internal, pkt);
344  if (ret < 0)
345  goto finish;
346 
347  ret = apply_param_change(avctx, pkt);
348  if (ret < 0)
349  goto finish;
350 
351  if (avctx->codec->receive_frame)
352  avci->compat_decode_consumed += pkt->size;
353 
354  return 0;
355 finish:
356  av_packet_unref(pkt);
357  return ret;
358 }
359 
360 /**
361  * Attempt to guess proper monotonic timestamps for decoded video frames
362  * which might have incorrect times. Input timestamps may wrap around, in
363  * which case the output will as well.
364  *
365  * @param pts the pts field of the decoded AVPacket, as passed through
366  * AVFrame.pts
367  * @param dts the dts field of the decoded AVPacket
368  * @return one of the input values, may be AV_NOPTS_VALUE
369  */
371  int64_t reordered_pts, int64_t dts)
372 {
373  int64_t pts = AV_NOPTS_VALUE;
374 
375  if (dts != AV_NOPTS_VALUE) {
377  ctx->pts_correction_last_dts = dts;
378  } else if (reordered_pts != AV_NOPTS_VALUE)
379  ctx->pts_correction_last_dts = reordered_pts;
380 
381  if (reordered_pts != AV_NOPTS_VALUE) {
382  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
383  ctx->pts_correction_last_pts = reordered_pts;
384  } else if(dts != AV_NOPTS_VALUE)
385  ctx->pts_correction_last_pts = dts;
386 
388  && reordered_pts != AV_NOPTS_VALUE)
389  pts = reordered_pts;
390  else
391  pts = dts;
392 
393  return pts;
394 }
395 
396 /*
397  * The core of the receive_frame_wrapper for the decoders implementing
398  * the simple API. Certain decoders might consume partial packets without
399  * returning any output, so this function needs to be called in a loop until it
400  * returns EAGAIN.
401  **/
403 {
404  AVCodecInternal *avci = avctx->internal;
405  DecodeSimpleContext *ds = &avci->ds;
406  AVPacket *pkt = ds->in_pkt;
407  // copy to ensure we do not change pkt
408  int got_frame, actual_got_frame;
409  int ret;
410 
411  if (!pkt->data && !avci->draining) {
412  av_packet_unref(pkt);
413  ret = ff_decode_get_packet(avctx, pkt);
414  if (ret < 0 && ret != AVERROR_EOF)
415  return ret;
416  }
417 
418  // Some codecs (at least wma lossless) will crash when feeding drain packets
419  // after EOF was signaled.
420  if (avci->draining_done)
421  return AVERROR_EOF;
422 
423  if (!pkt->data &&
424  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
426  return AVERROR_EOF;
427 
428  got_frame = 0;
429 
430  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
431  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
432  } else {
433  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
434 
436  frame->pkt_dts = pkt->dts;
437  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
438  if(!avctx->has_b_frames)
439  frame->pkt_pos = pkt->pos;
440  //FIXME these should be under if(!avctx->has_b_frames)
441  /* get_buffer is supposed to set frame parameters */
442  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
443  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
444  if (!frame->width) frame->width = avctx->width;
445  if (!frame->height) frame->height = avctx->height;
446  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
447  }
448  }
449  }
450  emms_c();
451  actual_got_frame = got_frame;
452 
453  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
454  if (frame->flags & AV_FRAME_FLAG_DISCARD)
455  got_frame = 0;
456  if (got_frame)
458  frame->pts,
459  frame->pkt_dts);
460  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
461  uint8_t *side;
462  int side_size;
463  uint32_t discard_padding = 0;
464  uint8_t skip_reason = 0;
465  uint8_t discard_reason = 0;
466 
467  if (ret >= 0 && got_frame) {
469  frame->pts,
470  frame->pkt_dts);
471  if (frame->format == AV_SAMPLE_FMT_NONE)
472  frame->format = avctx->sample_fmt;
473  if (!frame->channel_layout)
474  frame->channel_layout = avctx->channel_layout;
475  if (!frame->channels)
476  frame->channels = avctx->channels;
477  if (!frame->sample_rate)
478  frame->sample_rate = avctx->sample_rate;
479  }
480 
482  if(side && side_size>=10) {
484  discard_padding = AV_RL32(side + 4);
485  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
486  avctx->internal->skip_samples, (int)discard_padding);
487  skip_reason = AV_RL8(side + 8);
488  discard_reason = AV_RL8(side + 9);
489  }
490 
491  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
492  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
493  avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
494  got_frame = 0;
495  }
496 
497  if (avctx->internal->skip_samples > 0 && got_frame &&
498  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
499  if(frame->nb_samples <= avctx->internal->skip_samples){
500  got_frame = 0;
501  avctx->internal->skip_samples -= frame->nb_samples;
502  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
503  avctx->internal->skip_samples);
504  } else {
506  frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
507  if(avctx->pkt_timebase.num && avctx->sample_rate) {
508  int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
509  (AVRational){1, avctx->sample_rate},
510  avctx->pkt_timebase);
511  if(frame->pts!=AV_NOPTS_VALUE)
512  frame->pts += diff_ts;
513 #if FF_API_PKT_PTS
515  if(frame->pkt_pts!=AV_NOPTS_VALUE)
516  frame->pkt_pts += diff_ts;
518 #endif
519  if(frame->pkt_dts!=AV_NOPTS_VALUE)
520  frame->pkt_dts += diff_ts;
521  if (frame->pkt_duration >= diff_ts)
522  frame->pkt_duration -= diff_ts;
523  } else {
524  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
525  }
526  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
527  avctx->internal->skip_samples, frame->nb_samples);
528  frame->nb_samples -= avctx->internal->skip_samples;
529  avctx->internal->skip_samples = 0;
530  }
531  }
532 
533  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
534  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
535  if (discard_padding == frame->nb_samples) {
536  got_frame = 0;
537  } else {
538  if(avctx->pkt_timebase.num && avctx->sample_rate) {
539  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
540  (AVRational){1, avctx->sample_rate},
541  avctx->pkt_timebase);
542  frame->pkt_duration = diff_ts;
543  } else {
544  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
545  }
546  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
547  (int)discard_padding, frame->nb_samples);
548  frame->nb_samples -= discard_padding;
549  }
550  }
551 
552  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
554  if (fside) {
555  AV_WL32(fside->data, avctx->internal->skip_samples);
556  AV_WL32(fside->data + 4, discard_padding);
557  AV_WL8(fside->data + 8, skip_reason);
558  AV_WL8(fside->data + 9, discard_reason);
559  avctx->internal->skip_samples = 0;
560  }
561  }
562  }
563 
564  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
566  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
567  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
568  avci->showed_multi_packet_warning = 1;
569  }
570 
571  if (!got_frame)
572  av_frame_unref(frame);
573 
574  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
575  ret = pkt->size;
576 
577 #if FF_API_AVCTX_TIMEBASE
578  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
579  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
580 #endif
581 
582  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
583  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
584  if (avctx->internal->draining && !actual_got_frame) {
585  if (ret < 0) {
586  /* prevent infinite loop if a decoder wrongly always return error on draining */
587  /* reasonable nb_errors_max = maximum b frames + thread count */
588  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
589  avctx->thread_count : 1);
590 
591  if (avci->nb_draining_errors++ >= nb_errors_max) {
592  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
593  "Stop draining and force EOF.\n");
594  avci->draining_done = 1;
595  ret = AVERROR_BUG;
596  }
597  } else {
598  avci->draining_done = 1;
599  }
600  }
601 
602  avci->compat_decode_consumed += ret;
603 
604  if (ret >= pkt->size || ret < 0) {
605  av_packet_unref(pkt);
606  } else {
607  int consumed = ret;
608 
609  pkt->data += consumed;
610  pkt->size -= consumed;
611  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
612  pkt->pts = AV_NOPTS_VALUE;
613  pkt->dts = AV_NOPTS_VALUE;
616  }
617 
618  if (got_frame)
619  av_assert0(frame->buf[0]);
620 
621  return ret < 0 ? ret : 0;
622 }
623 
625 {
626  int ret;
627 
628  while (!frame->buf[0]) {
629  ret = decode_simple_internal(avctx, frame);
630  if (ret < 0)
631  return ret;
632  }
633 
634  return 0;
635 }
636 
638 {
639  AVCodecInternal *avci = avctx->internal;
640  int ret;
641 
642  av_assert0(!frame->buf[0]);
643 
644  if (avctx->codec->receive_frame)
645  ret = avctx->codec->receive_frame(avctx, frame);
646  else
647  ret = decode_simple_receive_frame(avctx, frame);
648 
649  if (ret == AVERROR_EOF)
650  avci->draining_done = 1;
651 
652  if (!ret) {
653  /* the only case where decode data is not set should be decoders
654  * that do not call ff_get_buffer() */
655  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
656  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
657 
658  if (frame->private_ref) {
660 
661  if (fdd->post_process) {
662  ret = fdd->post_process(avctx, frame);
663  if (ret < 0) {
664  av_frame_unref(frame);
665  return ret;
666  }
667  }
668  }
669  }
670 
671  /* free the per-frame decode data */
672  av_buffer_unref(&frame->private_ref);
673 
674  return ret;
675 }
676 
677 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
678 {
679  AVCodecInternal *avci = avctx->internal;
680  int ret;
681 
682  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
683  return AVERROR(EINVAL);
684 
685  if (avctx->internal->draining)
686  return AVERROR_EOF;
687 
688  if (avpkt && !avpkt->size && avpkt->data)
689  return AVERROR(EINVAL);
690 
692  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
693  ret = av_packet_ref(avci->buffer_pkt, avpkt);
694  if (ret < 0)
695  return ret;
696  }
697 
698  ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
699  if (ret < 0) {
701  return ret;
702  }
703 
704  if (!avci->buffer_frame->buf[0]) {
705  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
706  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
707  return ret;
708  }
709 
710  return 0;
711 }
712 
714 {
715  /* make sure we are noisy about decoders returning invalid cropping data */
716  if (frame->crop_left >= INT_MAX - frame->crop_right ||
717  frame->crop_top >= INT_MAX - frame->crop_bottom ||
718  (frame->crop_left + frame->crop_right) >= frame->width ||
719  (frame->crop_top + frame->crop_bottom) >= frame->height) {
720  av_log(avctx, AV_LOG_WARNING,
721  "Invalid cropping information set by a decoder: "
723  "(frame size %dx%d). This is a bug, please report it\n",
724  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
725  frame->width, frame->height);
726  frame->crop_left = 0;
727  frame->crop_right = 0;
728  frame->crop_top = 0;
729  frame->crop_bottom = 0;
730  return 0;
731  }
732 
733  if (!avctx->apply_cropping)
734  return 0;
735 
736  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
738 }
739 
740 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
741 {
742  AVCodecInternal *avci = avctx->internal;
743  int ret, changed;
744 
745  av_frame_unref(frame);
746 
747  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
748  return AVERROR(EINVAL);
749 
750  if (avci->buffer_frame->buf[0]) {
751  av_frame_move_ref(frame, avci->buffer_frame);
752  } else {
753  ret = decode_receive_frame_internal(avctx, frame);
754  if (ret < 0)
755  return ret;
756  }
757 
758  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
759  ret = apply_cropping(avctx, frame);
760  if (ret < 0) {
761  av_frame_unref(frame);
762  return ret;
763  }
764  }
765 
766  avctx->frame_number++;
767 
768  if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
769 
770  if (avctx->frame_number == 1) {
771  avci->initial_format = frame->format;
772  switch(avctx->codec_type) {
773  case AVMEDIA_TYPE_VIDEO:
774  avci->initial_width = frame->width;
775  avci->initial_height = frame->height;
776  break;
777  case AVMEDIA_TYPE_AUDIO:
778  avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
779  avctx->sample_rate;
780  avci->initial_channels = frame->channels;
781  avci->initial_channel_layout = frame->channel_layout;
782  break;
783  }
784  }
785 
786  if (avctx->frame_number > 1) {
787  changed = avci->initial_format != frame->format;
788 
789  switch(avctx->codec_type) {
790  case AVMEDIA_TYPE_VIDEO:
791  changed |= avci->initial_width != frame->width ||
792  avci->initial_height != frame->height;
793  break;
794  case AVMEDIA_TYPE_AUDIO:
795  changed |= avci->initial_sample_rate != frame->sample_rate ||
796  avci->initial_sample_rate != avctx->sample_rate ||
797  avci->initial_channels != frame->channels ||
798  avci->initial_channel_layout != frame->channel_layout;
799  break;
800  }
801 
802  if (changed) {
803  avci->changed_frames_dropped++;
804  av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
805  " drop count: %d \n",
806  avctx->frame_number, frame->pts,
807  avci->changed_frames_dropped);
808  av_frame_unref(frame);
809  return AVERROR_INPUT_CHANGED;
810  }
811  }
812  }
813  return 0;
814 }
815 
817  int *got_frame, const AVPacket *pkt)
818 {
819  AVCodecInternal *avci = avctx->internal;
820  int ret = 0;
821 
823 
824  if (avci->draining_done && pkt && pkt->size != 0) {
825  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
826  avcodec_flush_buffers(avctx);
827  }
828 
829  *got_frame = 0;
830  avci->compat_decode = 1;
831 
832  if (avci->compat_decode_partial_size > 0 &&
833  avci->compat_decode_partial_size != pkt->size) {
834  av_log(avctx, AV_LOG_ERROR,
835  "Got unexpected packet size after a partial decode\n");
836  ret = AVERROR(EINVAL);
837  goto finish;
838  }
839 
840  if (!avci->compat_decode_partial_size) {
841  ret = avcodec_send_packet(avctx, pkt);
842  if (ret == AVERROR_EOF)
843  ret = 0;
844  else if (ret == AVERROR(EAGAIN)) {
845  /* we fully drain all the output in each decode call, so this should not
846  * ever happen */
847  ret = AVERROR_BUG;
848  goto finish;
849  } else if (ret < 0)
850  goto finish;
851  }
852 
853  while (ret >= 0) {
854  ret = avcodec_receive_frame(avctx, frame);
855  if (ret < 0) {
856  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
857  ret = 0;
858  goto finish;
859  }
860 
861  if (frame != avci->compat_decode_frame) {
862  if (!avctx->refcounted_frames) {
863  ret = unrefcount_frame(avci, frame);
864  if (ret < 0)
865  goto finish;
866  }
867 
868  *got_frame = 1;
869  frame = avci->compat_decode_frame;
870  } else {
871  if (!avci->compat_decode_warned) {
872  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
873  "API cannot return all the frames for this decoder. "
874  "Some frames will be dropped. Update your code to the "
875  "new decoding API to fix this.\n");
876  avci->compat_decode_warned = 1;
877  }
878  }
879 
880  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
881  break;
882  }
883 
884 finish:
885  if (ret == 0) {
886  /* if there are any bsfs then assume full packet is always consumed */
887  if (avctx->codec->bsfs)
888  ret = pkt->size;
889  else
890  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
891  }
892  avci->compat_decode_consumed = 0;
893  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
894 
895  return ret;
896 }
897 
898 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
899  int *got_picture_ptr,
900  const AVPacket *avpkt)
901 {
902  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
903 }
904 
905 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
906  AVFrame *frame,
907  int *got_frame_ptr,
908  const AVPacket *avpkt)
909 {
910  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
911 }
912 
914 {
915  memset(sub, 0, sizeof(*sub));
916  sub->pts = AV_NOPTS_VALUE;
917 }
918 
919 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
920 static int recode_subtitle(AVCodecContext *avctx,
921  AVPacket *outpkt, const AVPacket *inpkt)
922 {
923 #if CONFIG_ICONV
924  iconv_t cd = (iconv_t)-1;
925  int ret = 0;
926  char *inb, *outb;
927  size_t inl, outl;
928  AVPacket tmp;
929 #endif
930 
931  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
932  return 0;
933 
934 #if CONFIG_ICONV
935  cd = iconv_open("UTF-8", avctx->sub_charenc);
936  av_assert0(cd != (iconv_t)-1);
937 
938  inb = inpkt->data;
939  inl = inpkt->size;
940 
941  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
942  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
943  ret = AVERROR(ENOMEM);
944  goto end;
945  }
946 
947  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
948  if (ret < 0)
949  goto end;
950  outpkt->buf = tmp.buf;
951  outpkt->data = tmp.data;
952  outpkt->size = tmp.size;
953  outb = outpkt->data;
954  outl = outpkt->size;
955 
956  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
957  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
958  outl >= outpkt->size || inl != 0) {
959  ret = FFMIN(AVERROR(errno), -1);
960  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
961  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
962  av_packet_unref(&tmp);
963  goto end;
964  }
965  outpkt->size -= outl;
966  memset(outpkt->data + outpkt->size, 0, outl);
967 
968 end:
969  if (cd != (iconv_t)-1)
970  iconv_close(cd);
971  return ret;
972 #else
973  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
974  return AVERROR(EINVAL);
975 #endif
976 }
977 
978 static int utf8_check(const uint8_t *str)
979 {
980  const uint8_t *byte;
981  uint32_t codepoint, min;
982 
983  while (*str) {
984  byte = str;
985  GET_UTF8(codepoint, *(byte++), return 0;);
986  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
987  1 << (5 * (byte - str) - 4);
988  if (codepoint < min || codepoint >= 0x110000 ||
989  codepoint == 0xFFFE /* BOM */ ||
990  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
991  return 0;
992  str = byte;
993  }
994  return 1;
995 }
996 
997 #if FF_API_ASS_TIMING
998 static void insert_ts(AVBPrint *buf, int ts)
999 {
1000  if (ts == -1) {
1001  av_bprintf(buf, "9:59:59.99,");
1002  } else {
1003  int h, m, s;
1004 
1005  h = ts/360000; ts -= 360000*h;
1006  m = ts/ 6000; ts -= 6000*m;
1007  s = ts/ 100; ts -= 100*s;
1008  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
1009  }
1010 }
1011 
1013 {
1014  int i;
1015  AVBPrint buf;
1016 
1018 
1019  for (i = 0; i < sub->num_rects; i++) {
1020  char *final_dialog;
1021  const char *dialog;
1022  AVSubtitleRect *rect = sub->rects[i];
1023  int ts_start, ts_duration = -1;
1024  long int layer;
1025 
1026  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
1027  continue;
1028 
1029  av_bprint_clear(&buf);
1030 
1031  /* skip ReadOrder */
1032  dialog = strchr(rect->ass, ',');
1033  if (!dialog)
1034  continue;
1035  dialog++;
1036 
1037  /* extract Layer or Marked */
1038  layer = strtol(dialog, (char**)&dialog, 10);
1039  if (*dialog != ',')
1040  continue;
1041  dialog++;
1042 
1043  /* rescale timing to ASS time base (ms) */
1044  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
1045  if (pkt->duration != -1)
1046  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
1047  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
1048 
1049  /* construct ASS (standalone file form with timestamps) string */
1050  av_bprintf(&buf, "Dialogue: %ld,", layer);
1051  insert_ts(&buf, ts_start);
1052  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
1053  av_bprintf(&buf, "%s\r\n", dialog);
1054 
1055  final_dialog = av_strdup(buf.str);
1056  if (!av_bprint_is_complete(&buf) || !final_dialog) {
1057  av_freep(&final_dialog);
1058  av_bprint_finalize(&buf, NULL);
1059  return AVERROR(ENOMEM);
1060  }
1061  av_freep(&rect->ass);
1062  rect->ass = final_dialog;
1063  }
1064 
1065  av_bprint_finalize(&buf, NULL);
1066  return 0;
1067 }
1068 #endif
1069 
1071  int *got_sub_ptr,
1072  AVPacket *avpkt)
1073 {
1074  int i, ret = 0;
1075 
1076  if (!avpkt->data && avpkt->size) {
1077  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1078  return AVERROR(EINVAL);
1079  }
1080  if (!avctx->codec)
1081  return AVERROR(EINVAL);
1082  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1083  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1084  return AVERROR(EINVAL);
1085  }
1086 
1087  *got_sub_ptr = 0;
1088  get_subtitle_defaults(sub);
1089 
1090  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1091  AVPacket pkt_recoded = *avpkt;
1092 
1093  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1094  if (ret < 0) {
1095  *got_sub_ptr = 0;
1096  } else {
1097  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1098  if (ret < 0)
1099  return ret;
1100 
1101  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1102  sub->pts = av_rescale_q(avpkt->pts,
1103  avctx->pkt_timebase, AV_TIME_BASE_Q);
1104  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1105  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1106  !!*got_sub_ptr >= !!sub->num_rects);
1107 
1108 #if FF_API_ASS_TIMING
1110  && *got_sub_ptr && sub->num_rects) {
1111  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1112  : avctx->time_base;
1113  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1114  if (err < 0)
1115  ret = err;
1116  }
1117 #endif
1118 
1119  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1120  avctx->pkt_timebase.num) {
1121  AVRational ms = { 1, 1000 };
1122  sub->end_display_time = av_rescale_q(avpkt->duration,
1123  avctx->pkt_timebase, ms);
1124  }
1125 
1127  sub->format = 0;
1128  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1129  sub->format = 1;
1130 
1131  for (i = 0; i < sub->num_rects; i++) {
1133  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1134  av_log(avctx, AV_LOG_ERROR,
1135  "Invalid UTF-8 in decoded subtitles text; "
1136  "maybe missing -sub_charenc option\n");
1137  avsubtitle_free(sub);
1138  ret = AVERROR_INVALIDDATA;
1139  break;
1140  }
1141  }
1142 
1143  if (avpkt->data != pkt_recoded.data) { // did we recode?
1144  /* prevent from destroying side data from original packet */
1145  pkt_recoded.side_data = NULL;
1146  pkt_recoded.side_data_elems = 0;
1147 
1148  av_packet_unref(&pkt_recoded);
1149  }
1150  }
1151 
1152  if (*got_sub_ptr)
1153  avctx->frame_number++;
1154  }
1155 
1156  return ret;
1157 }
1158 
1160  const enum AVPixelFormat *fmt)
1161 {
1162  const AVPixFmtDescriptor *desc;
1163  const AVCodecHWConfig *config;
1164  int i, n;
1165 
1166  // If a device was supplied when the codec was opened, assume that the
1167  // user wants to use it.
1168  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1169  AVHWDeviceContext *device_ctx =
1171  for (i = 0;; i++) {
1172  config = &avctx->codec->hw_configs[i]->public;
1173  if (!config)
1174  break;
1175  if (!(config->methods &
1177  continue;
1178  if (device_ctx->type != config->device_type)
1179  continue;
1180  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1181  if (config->pix_fmt == fmt[n])
1182  return fmt[n];
1183  }
1184  }
1185  }
1186  // No device or other setup, so we have to choose from things which
1187  // don't any other external information.
1188 
1189  // If the last element of the list is a software format, choose it
1190  // (this should be best software format if any exist).
1191  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1192  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1193  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1194  return fmt[n - 1];
1195 
1196  // Finally, traverse the list in order and choose the first entry
1197  // with no external dependencies (if there is no hardware configuration
1198  // information available then this just picks the first entry).
1199  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1200  for (i = 0;; i++) {
1201  config = avcodec_get_hw_config(avctx->codec, i);
1202  if (!config)
1203  break;
1204  if (config->pix_fmt == fmt[n])
1205  break;
1206  }
1207  if (!config) {
1208  // No specific config available, so the decoder must be able
1209  // to handle this format without any additional setup.
1210  return fmt[n];
1211  }
1213  // Usable with only internal setup.
1214  return fmt[n];
1215  }
1216  }
1217 
1218  // Nothing is usable, give up.
1219  return AV_PIX_FMT_NONE;
1220 }
1221 
1223  enum AVHWDeviceType dev_type)
1224 {
1225  AVHWDeviceContext *device_ctx;
1226  AVHWFramesContext *frames_ctx;
1227  int ret;
1228 
1229  if (!avctx->hwaccel)
1230  return AVERROR(ENOSYS);
1231 
1232  if (avctx->hw_frames_ctx)
1233  return 0;
1234  if (!avctx->hw_device_ctx) {
1235  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1236  "required for hardware accelerated decoding.\n");
1237  return AVERROR(EINVAL);
1238  }
1239 
1240  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1241  if (device_ctx->type != dev_type) {
1242  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1243  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1244  av_hwdevice_get_type_name(device_ctx->type));
1245  return AVERROR(EINVAL);
1246  }
1247 
1249  avctx->hw_device_ctx,
1250  avctx->hwaccel->pix_fmt,
1251  &avctx->hw_frames_ctx);
1252  if (ret < 0)
1253  return ret;
1254 
1255  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1256 
1257 
1258  if (frames_ctx->initial_pool_size) {
1259  // We guarantee 4 base work surfaces. The function above guarantees 1
1260  // (the absolute minimum), so add the missing count.
1261  frames_ctx->initial_pool_size += 3;
1262  }
1263 
1264  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1265  if (ret < 0) {
1266  av_buffer_unref(&avctx->hw_frames_ctx);
1267  return ret;
1268  }
1269 
1270  return 0;
1271 }
1272 
1274  AVBufferRef *device_ref,
1276  AVBufferRef **out_frames_ref)
1277 {
1278  AVBufferRef *frames_ref = NULL;
1279  const AVCodecHWConfigInternal *hw_config;
1280  const AVHWAccel *hwa;
1281  int i, ret;
1282 
1283  for (i = 0;; i++) {
1284  hw_config = avctx->codec->hw_configs[i];
1285  if (!hw_config)
1286  return AVERROR(ENOENT);
1287  if (hw_config->public.pix_fmt == hw_pix_fmt)
1288  break;
1289  }
1290 
1291  hwa = hw_config->hwaccel;
1292  if (!hwa || !hwa->frame_params)
1293  return AVERROR(ENOENT);
1294 
1295  frames_ref = av_hwframe_ctx_alloc(device_ref);
1296  if (!frames_ref)
1297  return AVERROR(ENOMEM);
1298 
1299  ret = hwa->frame_params(avctx, frames_ref);
1300  if (ret >= 0) {
1301  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1302 
1303  if (frames_ctx->initial_pool_size) {
1304  // If the user has requested that extra output surfaces be
1305  // available then add them here.
1306  if (avctx->extra_hw_frames > 0)
1307  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1308 
1309  // If frame threading is enabled then an extra surface per thread
1310  // is also required.
1311  if (avctx->active_thread_type & FF_THREAD_FRAME)
1312  frames_ctx->initial_pool_size += avctx->thread_count;
1313  }
1314 
1315  *out_frames_ref = frames_ref;
1316  } else {
1317  av_buffer_unref(&frames_ref);
1318  }
1319  return ret;
1320 }
1321 
1322 static int hwaccel_init(AVCodecContext *avctx,
1323  const AVCodecHWConfigInternal *hw_config)
1324 {
1325  const AVHWAccel *hwaccel;
1326  int err;
1327 
1328  hwaccel = hw_config->hwaccel;
1331  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1332  hwaccel->name);
1333  return AVERROR_PATCHWELCOME;
1334  }
1335 
1336  if (hwaccel->priv_data_size) {
1337  avctx->internal->hwaccel_priv_data =
1338  av_mallocz(hwaccel->priv_data_size);
1339  if (!avctx->internal->hwaccel_priv_data)
1340  return AVERROR(ENOMEM);
1341  }
1342 
1343  avctx->hwaccel = hwaccel;
1344  if (hwaccel->init) {
1345  err = hwaccel->init(avctx);
1346  if (err < 0) {
1347  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1348  "hwaccel initialisation returned error.\n",
1349  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1351  avctx->hwaccel = NULL;
1352  return err;
1353  }
1354  }
1355 
1356  return 0;
1357 }
1358 
1359 static void hwaccel_uninit(AVCodecContext *avctx)
1360 {
1361  if (avctx->hwaccel && avctx->hwaccel->uninit)
1362  avctx->hwaccel->uninit(avctx);
1363 
1365 
1366  avctx->hwaccel = NULL;
1367 
1368  av_buffer_unref(&avctx->hw_frames_ctx);
1369 }
1370 
1372 {
1373  const AVPixFmtDescriptor *desc;
1374  enum AVPixelFormat *choices;
1375  enum AVPixelFormat ret, user_choice;
1376  const AVCodecHWConfigInternal *hw_config;
1377  const AVCodecHWConfig *config;
1378  int i, n, err;
1379 
1380  // Find end of list.
1381  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1382  // Must contain at least one entry.
1383  av_assert0(n >= 1);
1384  // If a software format is available, it must be the last entry.
1385  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1386  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1387  // No software format is available.
1388  } else {
1389  avctx->sw_pix_fmt = fmt[n - 1];
1390  }
1391 
1392  choices = av_malloc_array(n + 1, sizeof(*choices));
1393  if (!choices)
1394  return AV_PIX_FMT_NONE;
1395 
1396  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1397 
1398  for (;;) {
1399  // Remove the previous hwaccel, if there was one.
1400  hwaccel_uninit(avctx);
1401 
1402  user_choice = avctx->get_format(avctx, choices);
1403  if (user_choice == AV_PIX_FMT_NONE) {
1404  // Explicitly chose nothing, give up.
1405  ret = AV_PIX_FMT_NONE;
1406  break;
1407  }
1408 
1409  desc = av_pix_fmt_desc_get(user_choice);
1410  if (!desc) {
1411  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1412  "get_format() callback.\n");
1413  ret = AV_PIX_FMT_NONE;
1414  break;
1415  }
1416  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1417  desc->name);
1418 
1419  for (i = 0; i < n; i++) {
1420  if (choices[i] == user_choice)
1421  break;
1422  }
1423  if (i == n) {
1424  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1425  "%s not in possible list.\n", desc->name);
1426  ret = AV_PIX_FMT_NONE;
1427  break;
1428  }
1429 
1430  if (avctx->codec->hw_configs) {
1431  for (i = 0;; i++) {
1432  hw_config = avctx->codec->hw_configs[i];
1433  if (!hw_config)
1434  break;
1435  if (hw_config->public.pix_fmt == user_choice)
1436  break;
1437  }
1438  } else {
1439  hw_config = NULL;
1440  }
1441 
1442  if (!hw_config) {
1443  // No config available, so no extra setup required.
1444  ret = user_choice;
1445  break;
1446  }
1447  config = &hw_config->public;
1448 
1449  if (config->methods &
1451  avctx->hw_frames_ctx) {
1452  const AVHWFramesContext *frames_ctx =
1454  if (frames_ctx->format != user_choice) {
1455  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1456  "does not match the format of the provided frames "
1457  "context.\n", desc->name);
1458  goto try_again;
1459  }
1460  } else if (config->methods &
1462  avctx->hw_device_ctx) {
1463  const AVHWDeviceContext *device_ctx =
1465  if (device_ctx->type != config->device_type) {
1466  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1467  "does not match the type of the provided device "
1468  "context.\n", desc->name);
1469  goto try_again;
1470  }
1471  } else if (config->methods &
1473  // Internal-only setup, no additional configuration.
1474  } else if (config->methods &
1476  // Some ad-hoc configuration we can't see and can't check.
1477  } else {
1478  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1479  "missing configuration.\n", desc->name);
1480  goto try_again;
1481  }
1482  if (hw_config->hwaccel) {
1483  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1484  "initialisation.\n", desc->name);
1485  err = hwaccel_init(avctx, hw_config);
1486  if (err < 0)
1487  goto try_again;
1488  }
1489  ret = user_choice;
1490  break;
1491 
1492  try_again:
1493  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1494  "get_format() without it.\n", desc->name);
1495  for (i = 0; i < n; i++) {
1496  if (choices[i] == user_choice)
1497  break;
1498  }
1499  for (; i + 1 < n; i++)
1500  choices[i] = choices[i + 1];
1501  --n;
1502  }
1503 
1504  av_freep(&choices);
1505  return ret;
1506 }
1507 
1509 {
1510  FramePool *pool = avctx->internal->pool;
1511  int i, ret;
1512 
1513  switch (avctx->codec_type) {
1514  case AVMEDIA_TYPE_VIDEO: {
1515  uint8_t *data[4];
1516  int linesize[4];
1517  int size[4] = { 0 };
1518  int w = frame->width;
1519  int h = frame->height;
1520  int tmpsize, unaligned;
1521 
1522  if (pool->format == frame->format &&
1523  pool->width == frame->width && pool->height == frame->height)
1524  return 0;
1525 
1526  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1527 
1528  do {
1529  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1530  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1531  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1532  if (ret < 0)
1533  return ret;
1534  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1535  w += w & ~(w - 1);
1536 
1537  unaligned = 0;
1538  for (i = 0; i < 4; i++)
1539  unaligned |= linesize[i] % pool->stride_align[i];
1540  } while (unaligned);
1541 
1542  tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
1543  NULL, linesize);
1544  if (tmpsize < 0)
1545  return tmpsize;
1546 
1547  for (i = 0; i < 3 && data[i + 1]; i++)
1548  size[i] = data[i + 1] - data[i];
1549  size[i] = tmpsize - (data[i] - data[0]);
1550 
1551  for (i = 0; i < 4; i++) {
1552  av_buffer_pool_uninit(&pool->pools[i]);
1553  pool->linesize[i] = linesize[i];
1554  if (size[i]) {
1555  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1556  CONFIG_MEMORY_POISONING ?
1557  NULL :
1559  if (!pool->pools[i]) {
1560  ret = AVERROR(ENOMEM);
1561  goto fail;
1562  }
1563  }
1564  }
1565  pool->format = frame->format;
1566  pool->width = frame->width;
1567  pool->height = frame->height;
1568 
1569  break;
1570  }
1571  case AVMEDIA_TYPE_AUDIO: {
1572  int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout);
1573  int planar = av_sample_fmt_is_planar(frame->format);
1574  int planes = planar ? ch : 1;
1575 
1576  if (pool->format == frame->format && pool->planes == planes &&
1577  pool->channels == ch && frame->nb_samples == pool->samples)
1578  return 0;
1579 
1580  av_buffer_pool_uninit(&pool->pools[0]);
1581  ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1582  frame->nb_samples, frame->format, 0);
1583  if (ret < 0)
1584  goto fail;
1585 
1586  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1587  if (!pool->pools[0]) {
1588  ret = AVERROR(ENOMEM);
1589  goto fail;
1590  }
1591 
1592  pool->format = frame->format;
1593  pool->planes = planes;
1594  pool->channels = ch;
1595  pool->samples = frame->nb_samples;
1596  break;
1597  }
1598  default: av_assert0(0);
1599  }
1600  return 0;
1601 fail:
1602  for (i = 0; i < 4; i++)
1603  av_buffer_pool_uninit(&pool->pools[i]);
1604  pool->format = -1;
1605  pool->planes = pool->channels = pool->samples = 0;
1606  pool->width = pool->height = 0;
1607  return ret;
1608 }
1609 
1611 {
1612  FramePool *pool = avctx->internal->pool;
1613  int planes = pool->planes;
1614  int i;
1615 
1616  frame->linesize[0] = pool->linesize[0];
1617 
1618  if (planes > AV_NUM_DATA_POINTERS) {
1619  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1620  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
1622  sizeof(*frame->extended_buf));
1623  if (!frame->extended_data || !frame->extended_buf) {
1624  av_freep(&frame->extended_data);
1625  av_freep(&frame->extended_buf);
1626  return AVERROR(ENOMEM);
1627  }
1628  } else {
1629  frame->extended_data = frame->data;
1630  av_assert0(frame->nb_extended_buf == 0);
1631  }
1632 
1633  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1634  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1635  if (!frame->buf[i])
1636  goto fail;
1637  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1638  }
1639  for (i = 0; i < frame->nb_extended_buf; i++) {
1640  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1641  if (!frame->extended_buf[i])
1642  goto fail;
1643  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1644  }
1645 
1646  if (avctx->debug & FF_DEBUG_BUFFERS)
1647  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1648 
1649  return 0;
1650 fail:
1651  av_frame_unref(frame);
1652  return AVERROR(ENOMEM);
1653 }
1654 
1656 {
1657  FramePool *pool = s->internal->pool;
1659  int i;
1660 
1661  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1662  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1663  return -1;
1664  }
1665 
1666  if (!desc) {
1667  av_log(s, AV_LOG_ERROR,
1668  "Unable to get pixel format descriptor for format %s\n",
1669  av_get_pix_fmt_name(pic->format));
1670  return AVERROR(EINVAL);
1671  }
1672 
1673  memset(pic->data, 0, sizeof(pic->data));
1674  pic->extended_data = pic->data;
1675 
1676  for (i = 0; i < 4 && pool->pools[i]; i++) {
1677  pic->linesize[i] = pool->linesize[i];
1678 
1679  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1680  if (!pic->buf[i])
1681  goto fail;
1682 
1683  pic->data[i] = pic->buf[i]->data;
1684  }
1685  for (; i < AV_NUM_DATA_POINTERS; i++) {
1686  pic->data[i] = NULL;
1687  pic->linesize[i] = 0;
1688  }
1689  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1690  ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1691  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1692 
1693  if (s->debug & FF_DEBUG_BUFFERS)
1694  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1695 
1696  return 0;
1697 fail:
1698  av_frame_unref(pic);
1699  return AVERROR(ENOMEM);
1700 }
1701 
1703 {
1704  int ret;
1705 
1706  if (avctx->hw_frames_ctx) {
1707  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1708  frame->width = avctx->coded_width;
1709  frame->height = avctx->coded_height;
1710  return ret;
1711  }
1712 
1713  if ((ret = update_frame_pool(avctx, frame)) < 0)
1714  return ret;
1715 
1716  switch (avctx->codec_type) {
1717  case AVMEDIA_TYPE_VIDEO:
1718  return video_get_buffer(avctx, frame);
1719  case AVMEDIA_TYPE_AUDIO:
1720  return audio_get_buffer(avctx, frame);
1721  default:
1722  return -1;
1723  }
1724 }
1725 
1727 {
1728  int size;
1729  const uint8_t *side_metadata;
1730 
1731  AVDictionary **frame_md = &frame->metadata;
1732 
1733  side_metadata = av_packet_get_side_data(avpkt,
1735  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1736 }
1737 
1739 {
1740  const AVPacket *pkt = avctx->internal->last_pkt_props;
1741  int i;
1742  static const struct {
1743  enum AVPacketSideDataType packet;
1745  } sd[] = {
1754  };
1755 
1756  if (pkt) {
1757  frame->pts = pkt->pts;
1758 #if FF_API_PKT_PTS
1760  frame->pkt_pts = pkt->pts;
1762 #endif
1763  frame->pkt_pos = pkt->pos;
1764  frame->pkt_duration = pkt->duration;
1765  frame->pkt_size = pkt->size;
1766 
1767  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1768  int size;
1769  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1770  if (packet_sd) {
1771  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1772  sd[i].frame,
1773  size);
1774  if (!frame_sd)
1775  return AVERROR(ENOMEM);
1776 
1777  memcpy(frame_sd->data, packet_sd, size);
1778  }
1779  }
1780  add_metadata_from_side_data(pkt, frame);
1781 
1782  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1783  frame->flags |= AV_FRAME_FLAG_DISCARD;
1784  } else {
1785  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1786  }
1787  }
1788  frame->reordered_opaque = avctx->reordered_opaque;
1789 
1790  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1791  frame->color_primaries = avctx->color_primaries;
1792  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1793  frame->color_trc = avctx->color_trc;
1794  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1795  frame->colorspace = avctx->colorspace;
1796  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1797  frame->color_range = avctx->color_range;
1799  frame->chroma_location = avctx->chroma_sample_location;
1800 
1801  switch (avctx->codec->type) {
1802  case AVMEDIA_TYPE_VIDEO:
1803  frame->format = avctx->pix_fmt;
1804  if (!frame->sample_aspect_ratio.num)
1805  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1806 
1807  if (frame->width && frame->height &&
1808  av_image_check_sar(frame->width, frame->height,
1809  frame->sample_aspect_ratio) < 0) {
1810  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1811  frame->sample_aspect_ratio.num,
1812  frame->sample_aspect_ratio.den);
1813  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1814  }
1815 
1816  break;
1817  case AVMEDIA_TYPE_AUDIO:
1818  if (!frame->sample_rate)
1819  frame->sample_rate = avctx->sample_rate;
1820  if (frame->format < 0)
1821  frame->format = avctx->sample_fmt;
1822  if (!frame->channel_layout) {
1823  if (avctx->channel_layout) {
1825  avctx->channels) {
1826  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1827  "configuration.\n");
1828  return AVERROR(EINVAL);
1829  }
1830 
1831  frame->channel_layout = avctx->channel_layout;
1832  } else {
1833  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1834  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1835  avctx->channels);
1836  return AVERROR(ENOSYS);
1837  }
1838  }
1839  }
1840  frame->channels = avctx->channels;
1841  break;
1842  }
1843  return 0;
1844 }
1845 
1847 {
1848  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1849  int i;
1850  int num_planes = av_pix_fmt_count_planes(frame->format);
1852  int flags = desc ? desc->flags : 0;
1853  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1854  num_planes = 2;
1855  if ((flags & FF_PSEUDOPAL) && frame->data[1])
1856  num_planes = 2;
1857  for (i = 0; i < num_planes; i++) {
1858  av_assert0(frame->data[i]);
1859  }
1860  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1861  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1862  if (frame->data[i])
1863  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1864  frame->data[i] = NULL;
1865  }
1866  }
1867 }
1868 
1869 static void decode_data_free(void *opaque, uint8_t *data)
1870 {
1871  FrameDecodeData *fdd = (FrameDecodeData*)data;
1872 
1873  if (fdd->post_process_opaque_free)
1875 
1876  if (fdd->hwaccel_priv_free)
1877  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1878 
1879  av_freep(&fdd);
1880 }
1881 
1883 {
1884  AVBufferRef *fdd_buf;
1885  FrameDecodeData *fdd;
1886 
1887  av_assert1(!frame->private_ref);
1888  av_buffer_unref(&frame->private_ref);
1889 
1890  fdd = av_mallocz(sizeof(*fdd));
1891  if (!fdd)
1892  return AVERROR(ENOMEM);
1893 
1894  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1896  if (!fdd_buf) {
1897  av_freep(&fdd);
1898  return AVERROR(ENOMEM);
1899  }
1900 
1901  frame->private_ref = fdd_buf;
1902 
1903  return 0;
1904 }
1905 
1907 {
1908  const AVHWAccel *hwaccel = avctx->hwaccel;
1909  int override_dimensions = 1;
1910  int ret;
1911 
1912  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1913  if ((ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1914  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1915  return AVERROR(EINVAL);
1916  }
1917 
1918  if (frame->width <= 0 || frame->height <= 0) {
1919  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1920  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1921  override_dimensions = 0;
1922  }
1923 
1924  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1925  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1926  return AVERROR(EINVAL);
1927  }
1928  }
1929  ret = ff_decode_frame_props(avctx, frame);
1930  if (ret < 0)
1931  return ret;
1932 
1933  if (hwaccel) {
1934  if (hwaccel->alloc_frame) {
1935  ret = hwaccel->alloc_frame(avctx, frame);
1936  goto end;
1937  }
1938  } else
1939  avctx->sw_pix_fmt = avctx->pix_fmt;
1940 
1941  ret = avctx->get_buffer2(avctx, frame, flags);
1942  if (ret < 0)
1943  goto end;
1944 
1945  validate_avframe_allocation(avctx, frame);
1946 
1947  ret = ff_attach_decode_data(frame);
1948  if (ret < 0)
1949  goto end;
1950 
1951 end:
1952  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1954  frame->width = avctx->width;
1955  frame->height = avctx->height;
1956  }
1957 
1958  if (ret < 0)
1959  av_frame_unref(frame);
1960 
1961  return ret;
1962 }
1963 
1965 {
1966  int ret = get_buffer_internal(avctx, frame, flags);
1967  if (ret < 0) {
1968  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1969  frame->width = frame->height = 0;
1970  }
1971  return ret;
1972 }
1973 
1975 {
1976  AVFrame *tmp;
1977  int ret;
1978 
1980 
1981  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1982  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1983  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1984  av_frame_unref(frame);
1985  }
1986 
1987  if (!frame->data[0])
1988  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1989 
1990  if (av_frame_is_writable(frame))
1991  return ff_decode_frame_props(avctx, frame);
1992 
1993  tmp = av_frame_alloc();
1994  if (!tmp)
1995  return AVERROR(ENOMEM);
1996 
1997  av_frame_move_ref(tmp, frame);
1998 
1999  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
2000  if (ret < 0) {
2001  av_frame_free(&tmp);
2002  return ret;
2003  }
2004 
2005  av_frame_copy(frame, tmp);
2006  av_frame_free(&tmp);
2007 
2008  return 0;
2009 }
2010 
2012 {
2013  int ret = reget_buffer_internal(avctx, frame);
2014  if (ret < 0)
2015  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
2016  return ret;
2017 }
2018 
2019 static void bsfs_flush(AVCodecContext *avctx)
2020 {
2021  DecodeFilterContext *s = &avctx->internal->filter;
2022 
2023  for (int i = 0; i < s->nb_bsfs; i++)
2024  av_bsf_flush(s->bsfs[i]);
2025 }
2026 
2028 {
2029  avctx->internal->draining = 0;
2030  avctx->internal->draining_done = 0;
2031  avctx->internal->nb_draining_errors = 0;
2035  avctx->internal->buffer_pkt_valid = 0;
2036 
2037  av_packet_unref(avctx->internal->ds.in_pkt);
2038 
2039  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
2040  ff_thread_flush(avctx);
2041  else if (avctx->codec->flush)
2042  avctx->codec->flush(avctx);
2043 
2044  avctx->pts_correction_last_pts =
2045  avctx->pts_correction_last_dts = INT64_MIN;
2046 
2047  bsfs_flush(avctx);
2048 
2049  if (!avctx->refcounted_frames)
2050  av_frame_unref(avctx->internal->to_free);
2051 }
2052 
2054 {
2055  DecodeFilterContext *s = &avctx->internal->filter;
2056  int i;
2057 
2058  for (i = 0; i < s->nb_bsfs; i++)
2059  av_bsf_free(&s->bsfs[i]);
2060  av_freep(&s->bsfs);
2061  s->nb_bsfs = 0;
2062 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:86
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:60
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2629
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1817
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwaccel.h:34
int nb_draining_errors
Definition: internal.h:220
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:3159
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1371
const struct AVCodec * codec
Definition: avcodec.h:1574
AVRational framerate
Definition: avcodec.h:3101
const char const char void * val
Definition: avisynth_c.h:863
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:3122
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const AVClass * priv_class
A class for the private data, used to declare bitstream filter private AVOptions. ...
Definition: avcodec.h:5827
#define AV_NUM_DATA_POINTERS
Definition: frame.h:269
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5793
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: avcodec.h:1184
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3139
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:385
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
int stride_align[AV_NUM_DATA_POINTERS]
Definition: internal.h:112
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:3346
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:637
const struct AVCodecHWConfigInternal ** hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: avcodec.h:3618
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:959
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1753
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:539
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:3678
const char * fmt
Definition: avisynth_c.h:861
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:172
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:1012
int changed_frames_dropped
Definition: internal.h:223
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
AVFrame * to_free
Definition: internal.h:159
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1497
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:913
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:459
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:624
const char * desc
Definition: nvenc.c:68
int width
Definition: internal.h:111
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: avcodec.h:1258
ATSC A53 Part 4 Closed Captions.
Definition: avcodec.h:1386
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2196
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:477
int ff_decode_bsfs_init(AVCodecContext *avctx)
Definition: decode.c:185
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5759
int size
Definition: avcodec.h:1478
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
int initial_channels
Definition: internal.h:227
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:3672
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1944
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: avcodec.h:780
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
int samples
Definition: internal.h:116
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:402
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:905
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:208
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:3933
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
static void bsfs_flush(AVCodecContext *avctx)
Definition: decode.c:2019
enum AVMediaType type
Definition: avcodec.h:3490
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:48
static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1906
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:920
AVBufferPool * pools[4]
Pools for each data plane.
Definition: internal.h:105
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1738
size_t crop_bottom
Definition: frame.h:628
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1070
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2725
void * priv_data
Opaque filter-specific private data.
Definition: avcodec.h:5780
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:3691
static int utf8_check(const uint8_t *str)
Definition: decode.c:978
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:134
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
Definition: decode.c:2011
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:713
void ff_decode_bsfs_uninit(AVCodecContext *avctx)
Definition: decode.c:2053
Mastering display metadata (based on SMPTE-2086:2014).
Definition: avcodec.h:1366
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1688
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:3934
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:99
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:3783
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx)
Allocate a context for a given bitstream filter.
Definition: bsf.c:81
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1506
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1006
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const char * name
Definition: opt.h:247
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:211
DecodeFilterContext filter
Definition: internal.h:166
int height
Definition: internal.h:111
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1159
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2229
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1882
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:523
The codec supports this format by some ad-hoc method.
Definition: avcodec.h:3448
AVOptions.
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:370
size_t crop_left
Definition: frame.h:629
The codec supports this format via the hw_device_ctx interface.
Definition: avcodec.h:3423
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:152
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1495
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1252
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:846
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:646
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:329
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:361
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3455
int planes
Definition: internal.h:114
Structure to hold side data for an AVFrame.
Definition: frame.h:201
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:287
size_t compat_decode_consumed
Definition: internal.h:209
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: avcodec.h:1477
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:554
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2800
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:3775
ptrdiff_t size
Definition: opengl_enc.c:100
int initial_height
Definition: internal.h:225
int initial_format
Definition: internal.h:224
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:198
static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
Definition: decode.c:291
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2203
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:608
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
FramePool * pool
Definition: internal.h:161
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:154
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:86
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3115
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2013
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1855
void * post_process_opaque
Definition: decode.h:46
#define AV_BPRINT_SIZE_UNLIMITED
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1322
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1846
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: avcodec.h:1216
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:881
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:3140
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2839
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: avcodec.h:3460
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:1871
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:740
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
Definition: decode.c:126
AVFrame * buffer_frame
Definition: internal.h:202
int capabilities
Codec capabilities.
Definition: avcodec.h:3496
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:512
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1460
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:523
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5799
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
const AVOption * av_opt_next(const void *obj, const AVOption *last)
Iterate over all AVOptions belonging to obj.
Definition: opt.c:45
int side_data_elems
Definition: avcodec.h:1489
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:3288
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:329
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1974
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:78
#define FFMAX(a, b)
Definition: common.h:94
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:465
#define fail()
Definition: checkasm.h:120
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
Definition: avstring.c:149
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:913
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwaccel.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:3789
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2272
uint32_t end_display_time
Definition: avcodec.h:3932
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3935
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:445
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: avcodec.h:732
size_t crop_top
Definition: frame.h:627
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:198
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:565
int channels
number of audio channels, only used for audio.
Definition: frame.h:573
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:505
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2690
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2831
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:3651
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:473
int channels
Definition: internal.h:115
AVFrame * compat_decode_frame
Definition: internal.h:213
int width
picture width / height.
Definition: avcodec.h:1738
uint8_t w
Definition: llviddspenc.c:38
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3258
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1726
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5805
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:816
AVPacket * in_pkt
Definition: internal.h:120
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: avcodec.h:1372
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:185
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2175
AVFrameSideDataType
Definition: frame.h:48
#define AVERROR_INPUT_CHANGED
Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) ...
Definition: error.h:73
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint16_t format
Definition: avcodec.h:3930
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:2668
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:2718
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2701
int n
Definition: avisynth_c.h:760
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: avcodec.h:3609
DecodeSimpleContext ds
Definition: internal.h:165
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:3148
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1610
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:195
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2820
int linesize[4]
Definition: internal.h:113
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:3156
if(ret)
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:2027
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:341
Content light level (based on CTA-861.3).
Definition: avcodec.h:1379
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:677
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:47
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1702
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:1573
int compat_decode_warned
Definition: internal.h:206
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:547
A list of zero terminated key/value strings.
Definition: avcodec.h:1316
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:898
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
int sample_rate
samples per second
Definition: avcodec.h:2221
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
int initial_sample_rate
Definition: internal.h:226
int debug
debug
Definition: avcodec.h:2646
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1757
main external API structure.
Definition: avcodec.h:1565
int(* receive_frame)(AVCodecContext *avctx, AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: avcodec.h:3593
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
int skip_samples_multiplier
Definition: internal.h:217
uint8_t * data
The data buffer.
Definition: buffer.h:89
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1054
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:356
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1964
uint8_t * data
Definition: frame.h:203
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: avcodec.h:775
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void * buf
Definition: avisynth_c.h:766
size_t crop_right
Definition: frame.h:630
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:1753
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:435
int sample_rate
Sample rate of the audio data.
Definition: frame.h:440
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2376
int showed_multi_packet_warning
Definition: internal.h:215
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:88
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:1024
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:275
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2189
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2182
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2031
Recommmends skipping the specified number of samples.
Definition: avcodec.h:1300
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:3265
int buffer_pkt_valid
Definition: internal.h:201
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:185
#define STRIDE_ALIGN
Definition: internal.h:97
enum AVChromaLocation chroma_location
Definition: frame.h:525
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:3804
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:532
The codec supports this format by some internal method.
Definition: avcodec.h:3439
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:2392
#define AV_CODEC_FLAG_DROPCHANGED
Don&#39;t output frames whose parameters differ from first decoded frame in stream.
Definition: avcodec.h:867
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:262
#define flags(name, subs,...)
Definition: cbs_av1.c:561
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: avcodec.h:1243
The codec supports this format via the hw_frames_ctx interface.
Definition: avcodec.h:3432
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1222
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1869
#define UTF8_MAX_BYTES
Definition: decode.c:919
static const struct @307 planes[]
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:369
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:200
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:377
A reference to a data buffer.
Definition: buffer.h:81
int extra_hw_frames
Definition: avcodec.h:3360
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:139
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1488
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1273
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:46
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: avcodec.h:1516
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int(* decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt)
Definition: avcodec.h:3575
common internal api header.
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:238
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:3811
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1045
void(* flush)(AVCodecContext *)
Flush buffers.
Definition: avcodec.h:3598
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:243
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:998
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:190
int caps_internal
Internal codec capabilities.
Definition: avcodec.h:3603
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:253
uint64_t initial_channel_layout
Definition: internal.h:228
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:790
AVBSFContext ** bsfs
Definition: internal.h:125
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1655
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:3889
#define FF_PSEUDOPAL
Definition: internal.h:367
AVHWDeviceType
Definition: hwcontext.h:27
void ff_thread_flush(AVCodecContext *avctx)
Wait for decoding threads to finish and reset internal state.
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int channels
number of audio channels
Definition: avcodec.h:2222
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1600
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:3924
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn&#39;t be output.
Definition: frame.h:497
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1652
enum AVColorPrimaries color_primaries
Definition: frame.h:514
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1476
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:3141
size_t compat_decode_partial_size
Definition: internal.h:212
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:892
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2252
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1508
int height
Definition: frame.h:326
void av_bsf_flush(AVBSFContext *ctx)
Reset the internal bitstream filter state / flush internal buffers.
Definition: bsf.c:175
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3138
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:516
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:334
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: avcodec.h:3467
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:3268
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:3160
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVSubtitleType type
Definition: avcodec.h:3915
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:315
int format
Definition: internal.h:110
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3310
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:582
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5787
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:334
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2624
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:3108
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1359
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: avcodec.h:1264
static uint8_t tmp[11]
Definition: aes_ctr.c:26