FFmpeg
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40 
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwconfig.h"
45 #include "internal.h"
46 #include "thread.h"
47 
48 typedef struct FramePool {
49  /**
50  * Pools for each data plane. For audio all the planes have the same size,
51  * so only pools[0] is used.
52  */
54 
55  /*
56  * Pool parameters
57  */
58  int format;
59  int width, height;
61  int linesize[4];
62  int planes;
63  int channels;
64  int samples;
65 } FramePool;
66 
67 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
68 {
69  int size, ret;
70  const uint8_t *data;
71  uint32_t flags;
72  int64_t val;
73 
75  if (!data)
76  return 0;
77 
78  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
79  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
80  "changes, but PARAM_CHANGE side data was sent to it.\n");
81  ret = AVERROR(EINVAL);
82  goto fail2;
83  }
84 
85  if (size < 4)
86  goto fail;
87 
88  flags = bytestream_get_le32(&data);
89  size -= 4;
90 
92  if (size < 4)
93  goto fail;
94  val = bytestream_get_le32(&data);
95  if (val <= 0 || val > INT_MAX) {
96  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
97  ret = AVERROR_INVALIDDATA;
98  goto fail2;
99  }
100  avctx->channels = val;
101  size -= 4;
102  }
104  if (size < 8)
105  goto fail;
106  avctx->channel_layout = bytestream_get_le64(&data);
107  size -= 8;
108  }
110  if (size < 4)
111  goto fail;
112  val = bytestream_get_le32(&data);
113  if (val <= 0 || val > INT_MAX) {
114  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
115  ret = AVERROR_INVALIDDATA;
116  goto fail2;
117  }
118  avctx->sample_rate = val;
119  size -= 4;
120  }
122  if (size < 8)
123  goto fail;
124  avctx->width = bytestream_get_le32(&data);
125  avctx->height = bytestream_get_le32(&data);
126  size -= 8;
127  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
128  if (ret < 0)
129  goto fail2;
130  }
131 
132  return 0;
133 fail:
134  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
135  ret = AVERROR_INVALIDDATA;
136 fail2:
137  if (ret < 0) {
138  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
139  if (avctx->err_recognition & AV_EF_EXPLODE)
140  return ret;
141  }
142  return 0;
143 }
144 
146 {
147  int ret = 0;
148 
150  if (pkt) {
151  ret = av_packet_copy_props(avci->last_pkt_props, pkt);
152  if (!ret)
153  avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_decode_frame_props().
154  }
155  return ret;
156 }
157 
159 {
160  int ret;
161 
162  /* move the original frame to our backup */
163  av_frame_unref(avci->to_free);
164  av_frame_move_ref(avci->to_free, frame);
165 
166  /* now copy everything except the AVBufferRefs back
167  * note that we make a COPY of the side data, so calling av_frame_free() on
168  * the caller's frame will work properly */
169  ret = av_frame_copy_props(frame, avci->to_free);
170  if (ret < 0)
171  return ret;
172 
173  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
174  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
175  if (avci->to_free->extended_data != avci->to_free->data) {
176  int planes = avci->to_free->channels;
177  int size = planes * sizeof(*frame->extended_data);
178 
179  if (!size) {
180  av_frame_unref(frame);
181  return AVERROR_BUG;
182  }
183 
184  frame->extended_data = av_malloc(size);
185  if (!frame->extended_data) {
186  av_frame_unref(frame);
187  return AVERROR(ENOMEM);
188  }
189  memcpy(frame->extended_data, avci->to_free->extended_data,
190  size);
191  } else
192  frame->extended_data = frame->data;
193 
194  frame->format = avci->to_free->format;
195  frame->width = avci->to_free->width;
196  frame->height = avci->to_free->height;
197  frame->channel_layout = avci->to_free->channel_layout;
198  frame->nb_samples = avci->to_free->nb_samples;
199  frame->channels = avci->to_free->channels;
200 
201  return 0;
202 }
203 
205 {
206  AVCodecInternal *avci = avctx->internal;
207  int ret;
208 
209  if (avci->bsf)
210  return 0;
211 
212  ret = av_bsf_list_parse_str(avctx->codec->bsfs, &avci->bsf);
213  if (ret < 0) {
214  av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", avctx->codec->bsfs, av_err2str(ret));
215  if (ret != AVERROR(ENOMEM))
216  ret = AVERROR_BUG;
217  goto fail;
218  }
219 
220  /* We do not currently have an API for passing the input timebase into decoders,
221  * but no filters used here should actually need it.
222  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
223  avci->bsf->time_base_in = (AVRational){ 1, 90000 };
224  ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx);
225  if (ret < 0)
226  goto fail;
227 
228  ret = av_bsf_init(avci->bsf);
229  if (ret < 0)
230  goto fail;
231 
232  return 0;
233 fail:
234  av_bsf_free(&avci->bsf);
235  return ret;
236 }
237 
239 {
240  AVCodecInternal *avci = avctx->internal;
241  int ret;
242 
243  if (avci->draining)
244  return AVERROR_EOF;
245 
246  ret = av_bsf_receive_packet(avci->bsf, pkt);
247  if (ret == AVERROR_EOF)
248  avci->draining = 1;
249  if (ret < 0)
250  return ret;
251 
252  ret = extract_packet_props(avctx->internal, pkt);
253  if (ret < 0)
254  goto finish;
255 
256  ret = apply_param_change(avctx, pkt);
257  if (ret < 0)
258  goto finish;
259 
260  if (avctx->codec->receive_frame)
261  avci->compat_decode_consumed += pkt->size;
262 
263  return 0;
264 finish:
265  av_packet_unref(pkt);
266  return ret;
267 }
268 
269 /**
270  * Attempt to guess proper monotonic timestamps for decoded video frames
271  * which might have incorrect times. Input timestamps may wrap around, in
272  * which case the output will as well.
273  *
274  * @param pts the pts field of the decoded AVPacket, as passed through
275  * AVFrame.pts
276  * @param dts the dts field of the decoded AVPacket
277  * @return one of the input values, may be AV_NOPTS_VALUE
278  */
280  int64_t reordered_pts, int64_t dts)
281 {
282  int64_t pts = AV_NOPTS_VALUE;
283 
284  if (dts != AV_NOPTS_VALUE) {
286  ctx->pts_correction_last_dts = dts;
287  } else if (reordered_pts != AV_NOPTS_VALUE)
288  ctx->pts_correction_last_dts = reordered_pts;
289 
290  if (reordered_pts != AV_NOPTS_VALUE) {
291  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
292  ctx->pts_correction_last_pts = reordered_pts;
293  } else if(dts != AV_NOPTS_VALUE)
294  ctx->pts_correction_last_pts = dts;
295 
297  && reordered_pts != AV_NOPTS_VALUE)
298  pts = reordered_pts;
299  else
300  pts = dts;
301 
302  return pts;
303 }
304 
305 /*
306  * The core of the receive_frame_wrapper for the decoders implementing
307  * the simple API. Certain decoders might consume partial packets without
308  * returning any output, so this function needs to be called in a loop until it
309  * returns EAGAIN.
310  **/
312 {
313  AVCodecInternal *avci = avctx->internal;
314  DecodeSimpleContext *ds = &avci->ds;
315  AVPacket *pkt = ds->in_pkt;
316  // copy to ensure we do not change pkt
317  int got_frame, actual_got_frame;
318  int ret;
319 
320  if (!pkt->data && !avci->draining) {
321  av_packet_unref(pkt);
322  ret = ff_decode_get_packet(avctx, pkt);
323  if (ret < 0 && ret != AVERROR_EOF)
324  return ret;
325  }
326 
327  // Some codecs (at least wma lossless) will crash when feeding drain packets
328  // after EOF was signaled.
329  if (avci->draining_done)
330  return AVERROR_EOF;
331 
332  if (!pkt->data &&
333  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
335  return AVERROR_EOF;
336 
337  got_frame = 0;
338 
339  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
340  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
341  } else {
342  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
343 
345  frame->pkt_dts = pkt->dts;
346  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
347  if(!avctx->has_b_frames)
348  frame->pkt_pos = pkt->pos;
349  //FIXME these should be under if(!avctx->has_b_frames)
350  /* get_buffer is supposed to set frame parameters */
351  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
352  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
353  if (!frame->width) frame->width = avctx->width;
354  if (!frame->height) frame->height = avctx->height;
355  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
356  }
357  }
358  }
359  emms_c();
360  actual_got_frame = got_frame;
361 
362  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
363  if (frame->flags & AV_FRAME_FLAG_DISCARD)
364  got_frame = 0;
365  if (got_frame)
367  frame->pts,
368  frame->pkt_dts);
369  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
370  uint8_t *side;
371  int side_size;
372  uint32_t discard_padding = 0;
373  uint8_t skip_reason = 0;
374  uint8_t discard_reason = 0;
375 
376  if (ret >= 0 && got_frame) {
378  frame->pts,
379  frame->pkt_dts);
380  if (frame->format == AV_SAMPLE_FMT_NONE)
381  frame->format = avctx->sample_fmt;
382  if (!frame->channel_layout)
383  frame->channel_layout = avctx->channel_layout;
384  if (!frame->channels)
385  frame->channels = avctx->channels;
386  if (!frame->sample_rate)
387  frame->sample_rate = avctx->sample_rate;
388  }
389 
391  if(side && side_size>=10) {
392  avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier;
393  discard_padding = AV_RL32(side + 4);
394  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
395  avci->skip_samples, (int)discard_padding);
396  skip_reason = AV_RL8(side + 8);
397  discard_reason = AV_RL8(side + 9);
398  }
399 
400  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
401  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
402  avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
403  got_frame = 0;
404  }
405 
406  if (avci->skip_samples > 0 && got_frame &&
407  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
408  if(frame->nb_samples <= avci->skip_samples){
409  got_frame = 0;
410  avci->skip_samples -= frame->nb_samples;
411  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
412  avci->skip_samples);
413  } else {
414  av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
415  frame->nb_samples - avci->skip_samples, avctx->channels, frame->format);
416  if(avctx->pkt_timebase.num && avctx->sample_rate) {
417  int64_t diff_ts = av_rescale_q(avci->skip_samples,
418  (AVRational){1, avctx->sample_rate},
419  avctx->pkt_timebase);
420  if(frame->pts!=AV_NOPTS_VALUE)
421  frame->pts += diff_ts;
422 #if FF_API_PKT_PTS
424  if(frame->pkt_pts!=AV_NOPTS_VALUE)
425  frame->pkt_pts += diff_ts;
427 #endif
428  if(frame->pkt_dts!=AV_NOPTS_VALUE)
429  frame->pkt_dts += diff_ts;
430  if (frame->pkt_duration >= diff_ts)
431  frame->pkt_duration -= diff_ts;
432  } else {
433  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
434  }
435  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
436  avci->skip_samples, frame->nb_samples);
437  frame->nb_samples -= avci->skip_samples;
438  avci->skip_samples = 0;
439  }
440  }
441 
442  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
443  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
444  if (discard_padding == frame->nb_samples) {
445  got_frame = 0;
446  } else {
447  if(avctx->pkt_timebase.num && avctx->sample_rate) {
448  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
449  (AVRational){1, avctx->sample_rate},
450  avctx->pkt_timebase);
451  frame->pkt_duration = diff_ts;
452  } else {
453  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
454  }
455  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
456  (int)discard_padding, frame->nb_samples);
457  frame->nb_samples -= discard_padding;
458  }
459  }
460 
461  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
463  if (fside) {
464  AV_WL32(fside->data, avci->skip_samples);
465  AV_WL32(fside->data + 4, discard_padding);
466  AV_WL8(fside->data + 8, skip_reason);
467  AV_WL8(fside->data + 9, discard_reason);
468  avci->skip_samples = 0;
469  }
470  }
471  }
472 
473  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
475  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
476  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
477  avci->showed_multi_packet_warning = 1;
478  }
479 
480  if (!got_frame)
481  av_frame_unref(frame);
482 
483  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
484  ret = pkt->size;
485 
486 #if FF_API_AVCTX_TIMEBASE
487  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
488  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
489 #endif
490 
491  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
492  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
493  if (avci->draining && !actual_got_frame) {
494  if (ret < 0) {
495  /* prevent infinite loop if a decoder wrongly always return error on draining */
496  /* reasonable nb_errors_max = maximum b frames + thread count */
497  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
498  avctx->thread_count : 1);
499 
500  if (avci->nb_draining_errors++ >= nb_errors_max) {
501  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
502  "Stop draining and force EOF.\n");
503  avci->draining_done = 1;
504  ret = AVERROR_BUG;
505  }
506  } else {
507  avci->draining_done = 1;
508  }
509  }
510 
511  avci->compat_decode_consumed += ret;
512 
513  if (ret >= pkt->size || ret < 0) {
514  av_packet_unref(pkt);
515  } else {
516  int consumed = ret;
517 
518  pkt->data += consumed;
519  pkt->size -= consumed;
520  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
521  pkt->pts = AV_NOPTS_VALUE;
522  pkt->dts = AV_NOPTS_VALUE;
525  }
526 
527  if (got_frame)
528  av_assert0(frame->buf[0]);
529 
530  return ret < 0 ? ret : 0;
531 }
532 
534 {
535  int ret;
536 
537  while (!frame->buf[0]) {
538  ret = decode_simple_internal(avctx, frame);
539  if (ret < 0)
540  return ret;
541  }
542 
543  return 0;
544 }
545 
547 {
548  AVCodecInternal *avci = avctx->internal;
549  int ret;
550 
551  av_assert0(!frame->buf[0]);
552 
553  if (avctx->codec->receive_frame)
554  ret = avctx->codec->receive_frame(avctx, frame);
555  else
556  ret = decode_simple_receive_frame(avctx, frame);
557 
558  if (ret == AVERROR_EOF)
559  avci->draining_done = 1;
560 
561  if (!ret) {
562  /* the only case where decode data is not set should be decoders
563  * that do not call ff_get_buffer() */
564  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
565  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
566 
567  if (frame->private_ref) {
569 
570  if (fdd->post_process) {
571  ret = fdd->post_process(avctx, frame);
572  if (ret < 0) {
573  av_frame_unref(frame);
574  return ret;
575  }
576  }
577  }
578  }
579 
580  /* free the per-frame decode data */
581  av_buffer_unref(&frame->private_ref);
582 
583  return ret;
584 }
585 
586 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
587 {
588  AVCodecInternal *avci = avctx->internal;
589  int ret;
590 
591  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
592  return AVERROR(EINVAL);
593 
594  if (avctx->internal->draining)
595  return AVERROR_EOF;
596 
597  if (avpkt && !avpkt->size && avpkt->data)
598  return AVERROR(EINVAL);
599 
601  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
602  ret = av_packet_ref(avci->buffer_pkt, avpkt);
603  if (ret < 0)
604  return ret;
605  }
606 
607  ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
608  if (ret < 0) {
610  return ret;
611  }
612 
613  if (!avci->buffer_frame->buf[0]) {
614  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
615  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
616  return ret;
617  }
618 
619  return 0;
620 }
621 
623 {
624  /* make sure we are noisy about decoders returning invalid cropping data */
625  if (frame->crop_left >= INT_MAX - frame->crop_right ||
626  frame->crop_top >= INT_MAX - frame->crop_bottom ||
627  (frame->crop_left + frame->crop_right) >= frame->width ||
628  (frame->crop_top + frame->crop_bottom) >= frame->height) {
629  av_log(avctx, AV_LOG_WARNING,
630  "Invalid cropping information set by a decoder: "
632  "(frame size %dx%d). This is a bug, please report it\n",
633  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
634  frame->width, frame->height);
635  frame->crop_left = 0;
636  frame->crop_right = 0;
637  frame->crop_top = 0;
638  frame->crop_bottom = 0;
639  return 0;
640  }
641 
642  if (!avctx->apply_cropping)
643  return 0;
644 
645  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
647 }
648 
649 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
650 {
651  AVCodecInternal *avci = avctx->internal;
652  int ret, changed;
653 
654  av_frame_unref(frame);
655 
656  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
657  return AVERROR(EINVAL);
658 
659  if (avci->buffer_frame->buf[0]) {
660  av_frame_move_ref(frame, avci->buffer_frame);
661  } else {
662  ret = decode_receive_frame_internal(avctx, frame);
663  if (ret < 0)
664  return ret;
665  }
666 
667  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
668  ret = apply_cropping(avctx, frame);
669  if (ret < 0) {
670  av_frame_unref(frame);
671  return ret;
672  }
673  }
674 
675  avctx->frame_number++;
676 
677  if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
678 
679  if (avctx->frame_number == 1) {
680  avci->initial_format = frame->format;
681  switch(avctx->codec_type) {
682  case AVMEDIA_TYPE_VIDEO:
683  avci->initial_width = frame->width;
684  avci->initial_height = frame->height;
685  break;
686  case AVMEDIA_TYPE_AUDIO:
687  avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
688  avctx->sample_rate;
689  avci->initial_channels = frame->channels;
690  avci->initial_channel_layout = frame->channel_layout;
691  break;
692  }
693  }
694 
695  if (avctx->frame_number > 1) {
696  changed = avci->initial_format != frame->format;
697 
698  switch(avctx->codec_type) {
699  case AVMEDIA_TYPE_VIDEO:
700  changed |= avci->initial_width != frame->width ||
701  avci->initial_height != frame->height;
702  break;
703  case AVMEDIA_TYPE_AUDIO:
704  changed |= avci->initial_sample_rate != frame->sample_rate ||
705  avci->initial_sample_rate != avctx->sample_rate ||
706  avci->initial_channels != frame->channels ||
707  avci->initial_channel_layout != frame->channel_layout;
708  break;
709  }
710 
711  if (changed) {
712  avci->changed_frames_dropped++;
713  av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
714  " drop count: %d \n",
715  avctx->frame_number, frame->pts,
716  avci->changed_frames_dropped);
717  av_frame_unref(frame);
718  return AVERROR_INPUT_CHANGED;
719  }
720  }
721  }
722  return 0;
723 }
724 
726  int *got_frame, const AVPacket *pkt)
727 {
728  AVCodecInternal *avci = avctx->internal;
729  int ret = 0;
730 
732 
733  if (avci->draining_done && pkt && pkt->size != 0) {
734  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
735  avcodec_flush_buffers(avctx);
736  }
737 
738  *got_frame = 0;
739 
740  if (avci->compat_decode_partial_size > 0 &&
741  avci->compat_decode_partial_size != pkt->size) {
742  av_log(avctx, AV_LOG_ERROR,
743  "Got unexpected packet size after a partial decode\n");
744  ret = AVERROR(EINVAL);
745  goto finish;
746  }
747 
748  if (!avci->compat_decode_partial_size) {
749  ret = avcodec_send_packet(avctx, pkt);
750  if (ret == AVERROR_EOF)
751  ret = 0;
752  else if (ret == AVERROR(EAGAIN)) {
753  /* we fully drain all the output in each decode call, so this should not
754  * ever happen */
755  ret = AVERROR_BUG;
756  goto finish;
757  } else if (ret < 0)
758  goto finish;
759  }
760 
761  while (ret >= 0) {
762  ret = avcodec_receive_frame(avctx, frame);
763  if (ret < 0) {
764  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
765  ret = 0;
766  goto finish;
767  }
768 
769  if (frame != avci->compat_decode_frame) {
770  if (!avctx->refcounted_frames) {
771  ret = unrefcount_frame(avci, frame);
772  if (ret < 0)
773  goto finish;
774  }
775 
776  *got_frame = 1;
777  frame = avci->compat_decode_frame;
778  } else {
779  if (!avci->compat_decode_warned) {
780  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
781  "API cannot return all the frames for this decoder. "
782  "Some frames will be dropped. Update your code to the "
783  "new decoding API to fix this.\n");
784  avci->compat_decode_warned = 1;
785  }
786  }
787 
788  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
789  break;
790  }
791 
792 finish:
793  if (ret == 0) {
794  /* if there are any bsfs then assume full packet is always consumed */
795  if (avctx->codec->bsfs)
796  ret = pkt->size;
797  else
798  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
799  }
800  avci->compat_decode_consumed = 0;
801  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
802 
803  return ret;
804 }
805 
806 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
807  int *got_picture_ptr,
808  const AVPacket *avpkt)
809 {
810  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
811 }
812 
813 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
814  AVFrame *frame,
815  int *got_frame_ptr,
816  const AVPacket *avpkt)
817 {
818  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
819 }
820 
822 {
823  memset(sub, 0, sizeof(*sub));
824  sub->pts = AV_NOPTS_VALUE;
825 }
826 
827 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
828 static int recode_subtitle(AVCodecContext *avctx,
829  AVPacket *outpkt, const AVPacket *inpkt)
830 {
831 #if CONFIG_ICONV
832  iconv_t cd = (iconv_t)-1;
833  int ret = 0;
834  char *inb, *outb;
835  size_t inl, outl;
836  AVPacket tmp;
837 #endif
838 
839  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
840  return 0;
841 
842 #if CONFIG_ICONV
843  cd = iconv_open("UTF-8", avctx->sub_charenc);
844  av_assert0(cd != (iconv_t)-1);
845 
846  inb = inpkt->data;
847  inl = inpkt->size;
848 
849  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
850  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
851  ret = AVERROR(ENOMEM);
852  goto end;
853  }
854 
855  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
856  if (ret < 0)
857  goto end;
858  outpkt->buf = tmp.buf;
859  outpkt->data = tmp.data;
860  outpkt->size = tmp.size;
861  outb = outpkt->data;
862  outl = outpkt->size;
863 
864  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
865  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
866  outl >= outpkt->size || inl != 0) {
867  ret = FFMIN(AVERROR(errno), -1);
868  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
869  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
870  av_packet_unref(&tmp);
871  goto end;
872  }
873  outpkt->size -= outl;
874  memset(outpkt->data + outpkt->size, 0, outl);
875 
876 end:
877  if (cd != (iconv_t)-1)
878  iconv_close(cd);
879  return ret;
880 #else
881  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
882  return AVERROR(EINVAL);
883 #endif
884 }
885 
886 static int utf8_check(const uint8_t *str)
887 {
888  const uint8_t *byte;
889  uint32_t codepoint, min;
890 
891  while (*str) {
892  byte = str;
893  GET_UTF8(codepoint, *(byte++), return 0;);
894  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
895  1 << (5 * (byte - str) - 4);
896  if (codepoint < min || codepoint >= 0x110000 ||
897  codepoint == 0xFFFE /* BOM */ ||
898  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
899  return 0;
900  str = byte;
901  }
902  return 1;
903 }
904 
905 #if FF_API_ASS_TIMING
906 static void insert_ts(AVBPrint *buf, int ts)
907 {
908  if (ts == -1) {
909  av_bprintf(buf, "9:59:59.99,");
910  } else {
911  int h, m, s;
912 
913  h = ts/360000; ts -= 360000*h;
914  m = ts/ 6000; ts -= 6000*m;
915  s = ts/ 100; ts -= 100*s;
916  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
917  }
918 }
919 
921 {
922  int i;
923  AVBPrint buf;
924 
926 
927  for (i = 0; i < sub->num_rects; i++) {
928  char *final_dialog;
929  const char *dialog;
930  AVSubtitleRect *rect = sub->rects[i];
931  int ts_start, ts_duration = -1;
932  long int layer;
933 
934  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
935  continue;
936 
937  av_bprint_clear(&buf);
938 
939  /* skip ReadOrder */
940  dialog = strchr(rect->ass, ',');
941  if (!dialog)
942  continue;
943  dialog++;
944 
945  /* extract Layer or Marked */
946  layer = strtol(dialog, (char**)&dialog, 10);
947  if (*dialog != ',')
948  continue;
949  dialog++;
950 
951  /* rescale timing to ASS time base (ms) */
952  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
953  if (pkt->duration != -1)
954  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
955  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
956 
957  /* construct ASS (standalone file form with timestamps) string */
958  av_bprintf(&buf, "Dialogue: %ld,", layer);
959  insert_ts(&buf, ts_start);
960  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
961  av_bprintf(&buf, "%s\r\n", dialog);
962 
963  final_dialog = av_strdup(buf.str);
964  if (!av_bprint_is_complete(&buf) || !final_dialog) {
965  av_freep(&final_dialog);
966  av_bprint_finalize(&buf, NULL);
967  return AVERROR(ENOMEM);
968  }
969  av_freep(&rect->ass);
970  rect->ass = final_dialog;
971  }
972 
973  av_bprint_finalize(&buf, NULL);
974  return 0;
975 }
976 #endif
977 
979  int *got_sub_ptr,
980  AVPacket *avpkt)
981 {
982  int i, ret = 0;
983 
984  if (!avpkt->data && avpkt->size) {
985  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
986  return AVERROR(EINVAL);
987  }
988  if (!avctx->codec)
989  return AVERROR(EINVAL);
990  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
991  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
992  return AVERROR(EINVAL);
993  }
994 
995  *got_sub_ptr = 0;
997 
998  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
999  AVPacket pkt_recoded = *avpkt;
1000 
1001  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1002  if (ret < 0) {
1003  *got_sub_ptr = 0;
1004  } else {
1005  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1006  if (ret < 0)
1007  return ret;
1008 
1009  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1010  sub->pts = av_rescale_q(avpkt->pts,
1011  avctx->pkt_timebase, AV_TIME_BASE_Q);
1012  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1013  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1014  !!*got_sub_ptr >= !!sub->num_rects);
1015 
1016 #if FF_API_ASS_TIMING
1018  && *got_sub_ptr && sub->num_rects) {
1019  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1020  : avctx->time_base;
1021  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1022  if (err < 0)
1023  ret = err;
1024  }
1025 #endif
1026 
1027  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1028  avctx->pkt_timebase.num) {
1029  AVRational ms = { 1, 1000 };
1030  sub->end_display_time = av_rescale_q(avpkt->duration,
1031  avctx->pkt_timebase, ms);
1032  }
1033 
1035  sub->format = 0;
1036  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1037  sub->format = 1;
1038 
1039  for (i = 0; i < sub->num_rects; i++) {
1041  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1042  av_log(avctx, AV_LOG_ERROR,
1043  "Invalid UTF-8 in decoded subtitles text; "
1044  "maybe missing -sub_charenc option\n");
1045  avsubtitle_free(sub);
1046  ret = AVERROR_INVALIDDATA;
1047  break;
1048  }
1049  }
1050 
1051  if (avpkt->data != pkt_recoded.data) { // did we recode?
1052  /* prevent from destroying side data from original packet */
1053  pkt_recoded.side_data = NULL;
1054  pkt_recoded.side_data_elems = 0;
1055 
1056  av_packet_unref(&pkt_recoded);
1057  }
1058  }
1059 
1060  if (*got_sub_ptr)
1061  avctx->frame_number++;
1062  }
1063 
1064  return ret;
1065 }
1066 
1068  const enum AVPixelFormat *fmt)
1069 {
1070  const AVPixFmtDescriptor *desc;
1071  const AVCodecHWConfig *config;
1072  int i, n;
1073 
1074  // If a device was supplied when the codec was opened, assume that the
1075  // user wants to use it.
1076  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1077  AVHWDeviceContext *device_ctx =
1079  for (i = 0;; i++) {
1080  config = &avctx->codec->hw_configs[i]->public;
1081  if (!config)
1082  break;
1083  if (!(config->methods &
1085  continue;
1086  if (device_ctx->type != config->device_type)
1087  continue;
1088  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1089  if (config->pix_fmt == fmt[n])
1090  return fmt[n];
1091  }
1092  }
1093  }
1094  // No device or other setup, so we have to choose from things which
1095  // don't any other external information.
1096 
1097  // If the last element of the list is a software format, choose it
1098  // (this should be best software format if any exist).
1099  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1100  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1101  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1102  return fmt[n - 1];
1103 
1104  // Finally, traverse the list in order and choose the first entry
1105  // with no external dependencies (if there is no hardware configuration
1106  // information available then this just picks the first entry).
1107  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1108  for (i = 0;; i++) {
1109  config = avcodec_get_hw_config(avctx->codec, i);
1110  if (!config)
1111  break;
1112  if (config->pix_fmt == fmt[n])
1113  break;
1114  }
1115  if (!config) {
1116  // No specific config available, so the decoder must be able
1117  // to handle this format without any additional setup.
1118  return fmt[n];
1119  }
1121  // Usable with only internal setup.
1122  return fmt[n];
1123  }
1124  }
1125 
1126  // Nothing is usable, give up.
1127  return AV_PIX_FMT_NONE;
1128 }
1129 
1131  enum AVHWDeviceType dev_type)
1132 {
1133  AVHWDeviceContext *device_ctx;
1134  AVHWFramesContext *frames_ctx;
1135  int ret;
1136 
1137  if (!avctx->hwaccel)
1138  return AVERROR(ENOSYS);
1139 
1140  if (avctx->hw_frames_ctx)
1141  return 0;
1142  if (!avctx->hw_device_ctx) {
1143  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1144  "required for hardware accelerated decoding.\n");
1145  return AVERROR(EINVAL);
1146  }
1147 
1148  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1149  if (device_ctx->type != dev_type) {
1150  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1151  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1152  av_hwdevice_get_type_name(device_ctx->type));
1153  return AVERROR(EINVAL);
1154  }
1155 
1157  avctx->hw_device_ctx,
1158  avctx->hwaccel->pix_fmt,
1159  &avctx->hw_frames_ctx);
1160  if (ret < 0)
1161  return ret;
1162 
1163  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1164 
1165 
1166  if (frames_ctx->initial_pool_size) {
1167  // We guarantee 4 base work surfaces. The function above guarantees 1
1168  // (the absolute minimum), so add the missing count.
1169  frames_ctx->initial_pool_size += 3;
1170  }
1171 
1172  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1173  if (ret < 0) {
1174  av_buffer_unref(&avctx->hw_frames_ctx);
1175  return ret;
1176  }
1177 
1178  return 0;
1179 }
1180 
1182  AVBufferRef *device_ref,
1184  AVBufferRef **out_frames_ref)
1185 {
1186  AVBufferRef *frames_ref = NULL;
1187  const AVCodecHWConfigInternal *hw_config;
1188  const AVHWAccel *hwa;
1189  int i, ret;
1190 
1191  for (i = 0;; i++) {
1192  hw_config = avctx->codec->hw_configs[i];
1193  if (!hw_config)
1194  return AVERROR(ENOENT);
1195  if (hw_config->public.pix_fmt == hw_pix_fmt)
1196  break;
1197  }
1198 
1199  hwa = hw_config->hwaccel;
1200  if (!hwa || !hwa->frame_params)
1201  return AVERROR(ENOENT);
1202 
1203  frames_ref = av_hwframe_ctx_alloc(device_ref);
1204  if (!frames_ref)
1205  return AVERROR(ENOMEM);
1206 
1207  ret = hwa->frame_params(avctx, frames_ref);
1208  if (ret >= 0) {
1209  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1210 
1211  if (frames_ctx->initial_pool_size) {
1212  // If the user has requested that extra output surfaces be
1213  // available then add them here.
1214  if (avctx->extra_hw_frames > 0)
1215  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1216 
1217  // If frame threading is enabled then an extra surface per thread
1218  // is also required.
1219  if (avctx->active_thread_type & FF_THREAD_FRAME)
1220  frames_ctx->initial_pool_size += avctx->thread_count;
1221  }
1222 
1223  *out_frames_ref = frames_ref;
1224  } else {
1225  av_buffer_unref(&frames_ref);
1226  }
1227  return ret;
1228 }
1229 
1230 static int hwaccel_init(AVCodecContext *avctx,
1231  const AVCodecHWConfigInternal *hw_config)
1232 {
1233  const AVHWAccel *hwaccel;
1234  int err;
1235 
1236  hwaccel = hw_config->hwaccel;
1239  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1240  hwaccel->name);
1241  return AVERROR_PATCHWELCOME;
1242  }
1243 
1244  if (hwaccel->priv_data_size) {
1245  avctx->internal->hwaccel_priv_data =
1246  av_mallocz(hwaccel->priv_data_size);
1247  if (!avctx->internal->hwaccel_priv_data)
1248  return AVERROR(ENOMEM);
1249  }
1250 
1251  avctx->hwaccel = hwaccel;
1252  if (hwaccel->init) {
1253  err = hwaccel->init(avctx);
1254  if (err < 0) {
1255  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1256  "hwaccel initialisation returned error.\n",
1257  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1259  avctx->hwaccel = NULL;
1260  return err;
1261  }
1262  }
1263 
1264  return 0;
1265 }
1266 
1267 static void hwaccel_uninit(AVCodecContext *avctx)
1268 {
1269  if (avctx->hwaccel && avctx->hwaccel->uninit)
1270  avctx->hwaccel->uninit(avctx);
1271 
1273 
1274  avctx->hwaccel = NULL;
1275 
1276  av_buffer_unref(&avctx->hw_frames_ctx);
1277 }
1278 
1279 int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
1280 {
1281  const AVPixFmtDescriptor *desc;
1282  enum AVPixelFormat *choices;
1283  enum AVPixelFormat ret, user_choice;
1284  const AVCodecHWConfigInternal *hw_config;
1285  const AVCodecHWConfig *config;
1286  int i, n, err;
1287 
1288  // Find end of list.
1289  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1290  // Must contain at least one entry.
1291  av_assert0(n >= 1);
1292  // If a software format is available, it must be the last entry.
1293  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1294  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1295  // No software format is available.
1296  } else {
1297  avctx->sw_pix_fmt = fmt[n - 1];
1298  }
1299 
1300  choices = av_malloc_array(n + 1, sizeof(*choices));
1301  if (!choices)
1302  return AV_PIX_FMT_NONE;
1303 
1304  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1305 
1306  for (;;) {
1307  // Remove the previous hwaccel, if there was one.
1308  hwaccel_uninit(avctx);
1309 
1310  user_choice = avctx->get_format(avctx, choices);
1311  if (user_choice == AV_PIX_FMT_NONE) {
1312  // Explicitly chose nothing, give up.
1313  ret = AV_PIX_FMT_NONE;
1314  break;
1315  }
1316 
1317  desc = av_pix_fmt_desc_get(user_choice);
1318  if (!desc) {
1319  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1320  "get_format() callback.\n");
1321  ret = AV_PIX_FMT_NONE;
1322  break;
1323  }
1324  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1325  desc->name);
1326 
1327  for (i = 0; i < n; i++) {
1328  if (choices[i] == user_choice)
1329  break;
1330  }
1331  if (i == n) {
1332  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1333  "%s not in possible list.\n", desc->name);
1334  ret = AV_PIX_FMT_NONE;
1335  break;
1336  }
1337 
1338  if (avctx->codec->hw_configs) {
1339  for (i = 0;; i++) {
1340  hw_config = avctx->codec->hw_configs[i];
1341  if (!hw_config)
1342  break;
1343  if (hw_config->public.pix_fmt == user_choice)
1344  break;
1345  }
1346  } else {
1347  hw_config = NULL;
1348  }
1349 
1350  if (!hw_config) {
1351  // No config available, so no extra setup required.
1352  ret = user_choice;
1353  break;
1354  }
1355  config = &hw_config->public;
1356 
1357  if (config->methods &
1359  avctx->hw_frames_ctx) {
1360  const AVHWFramesContext *frames_ctx =
1362  if (frames_ctx->format != user_choice) {
1363  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1364  "does not match the format of the provided frames "
1365  "context.\n", desc->name);
1366  goto try_again;
1367  }
1368  } else if (config->methods &
1370  avctx->hw_device_ctx) {
1371  const AVHWDeviceContext *device_ctx =
1373  if (device_ctx->type != config->device_type) {
1374  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1375  "does not match the type of the provided device "
1376  "context.\n", desc->name);
1377  goto try_again;
1378  }
1379  } else if (config->methods &
1381  // Internal-only setup, no additional configuration.
1382  } else if (config->methods &
1384  // Some ad-hoc configuration we can't see and can't check.
1385  } else {
1386  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1387  "missing configuration.\n", desc->name);
1388  goto try_again;
1389  }
1390  if (hw_config->hwaccel) {
1391  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1392  "initialisation.\n", desc->name);
1393  err = hwaccel_init(avctx, hw_config);
1394  if (err < 0)
1395  goto try_again;
1396  }
1397  ret = user_choice;
1398  break;
1399 
1400  try_again:
1401  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1402  "get_format() without it.\n", desc->name);
1403  for (i = 0; i < n; i++) {
1404  if (choices[i] == user_choice)
1405  break;
1406  }
1407  for (; i + 1 < n; i++)
1408  choices[i] = choices[i + 1];
1409  --n;
1410  }
1411 
1412  av_freep(&choices);
1413  return ret;
1414 }
1415 
1416 static void frame_pool_free(void *opaque, uint8_t *data)
1417 {
1418  FramePool *pool = (FramePool*)data;
1419  int i;
1420 
1421  for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
1422  av_buffer_pool_uninit(&pool->pools[i]);
1423 
1424  av_freep(&data);
1425 }
1426 
1428 {
1429  FramePool *pool = av_mallocz(sizeof(*pool));
1430  AVBufferRef *buf;
1431 
1432  if (!pool)
1433  return NULL;
1434 
1435  buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
1436  frame_pool_free, NULL, 0);
1437  if (!buf) {
1438  av_freep(&pool);
1439  return NULL;
1440  }
1441 
1442  return buf;
1443 }
1444 
1446 {
1447  FramePool *pool = avctx->internal->pool ?
1448  (FramePool*)avctx->internal->pool->data : NULL;
1449  AVBufferRef *pool_buf;
1450  int i, ret, ch, planes;
1451 
1452  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1453  int planar = av_sample_fmt_is_planar(frame->format);
1454  ch = frame->channels;
1455  planes = planar ? ch : 1;
1456  }
1457 
1458  if (pool && pool->format == frame->format) {
1459  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1460  pool->width == frame->width && pool->height == frame->height)
1461  return 0;
1462  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
1463  pool->channels == ch && frame->nb_samples == pool->samples)
1464  return 0;
1465  }
1466 
1467  pool_buf = frame_pool_alloc();
1468  if (!pool_buf)
1469  return AVERROR(ENOMEM);
1470  pool = (FramePool*)pool_buf->data;
1471 
1472  switch (avctx->codec_type) {
1473  case AVMEDIA_TYPE_VIDEO: {
1474  int linesize[4];
1475  int w = frame->width;
1476  int h = frame->height;
1477  int unaligned;
1478  ptrdiff_t linesize1[4];
1479  size_t size[4];
1480 
1481  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1482 
1483  do {
1484  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1485  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1486  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1487  if (ret < 0)
1488  goto fail;
1489  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1490  w += w & ~(w - 1);
1491 
1492  unaligned = 0;
1493  for (i = 0; i < 4; i++)
1494  unaligned |= linesize[i] % pool->stride_align[i];
1495  } while (unaligned);
1496 
1497  for (i = 0; i < 4; i++)
1498  linesize1[i] = linesize[i];
1499  ret = av_image_fill_plane_sizes(size, avctx->pix_fmt, h, linesize1);
1500  if (ret < 0)
1501  goto fail;
1502 
1503  for (i = 0; i < 4; i++) {
1504  pool->linesize[i] = linesize[i];
1505  if (size[i]) {
1506  if (size[i] > INT_MAX - (16 + STRIDE_ALIGN - 1)) {
1507  ret = AVERROR(EINVAL);
1508  goto fail;
1509  }
1510  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1511  CONFIG_MEMORY_POISONING ?
1512  NULL :
1514  if (!pool->pools[i]) {
1515  ret = AVERROR(ENOMEM);
1516  goto fail;
1517  }
1518  }
1519  }
1520  pool->format = frame->format;
1521  pool->width = frame->width;
1522  pool->height = frame->height;
1523 
1524  break;
1525  }
1526  case AVMEDIA_TYPE_AUDIO: {
1528  frame->nb_samples, frame->format, 0);
1529  if (ret < 0)
1530  goto fail;
1531 
1532  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1533  if (!pool->pools[0]) {
1534  ret = AVERROR(ENOMEM);
1535  goto fail;
1536  }
1537 
1538  pool->format = frame->format;
1539  pool->planes = planes;
1540  pool->channels = ch;
1541  pool->samples = frame->nb_samples;
1542  break;
1543  }
1544  default: av_assert0(0);
1545  }
1546 
1547  av_buffer_unref(&avctx->internal->pool);
1548  avctx->internal->pool = pool_buf;
1549 
1550  return 0;
1551 fail:
1552  av_buffer_unref(&pool_buf);
1553  return ret;
1554 }
1555 
1557 {
1558  FramePool *pool = (FramePool*)avctx->internal->pool->data;
1559  int planes = pool->planes;
1560  int i;
1561 
1562  frame->linesize[0] = pool->linesize[0];
1563 
1565  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1568  sizeof(*frame->extended_buf));
1569  if (!frame->extended_data || !frame->extended_buf) {
1570  av_freep(&frame->extended_data);
1571  av_freep(&frame->extended_buf);
1572  return AVERROR(ENOMEM);
1573  }
1574  } else {
1575  frame->extended_data = frame->data;
1576  av_assert0(frame->nb_extended_buf == 0);
1577  }
1578 
1579  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1580  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1581  if (!frame->buf[i])
1582  goto fail;
1583  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1584  }
1585  for (i = 0; i < frame->nb_extended_buf; i++) {
1586  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1587  if (!frame->extended_buf[i])
1588  goto fail;
1589  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1590  }
1591 
1592  if (avctx->debug & FF_DEBUG_BUFFERS)
1593  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1594 
1595  return 0;
1596 fail:
1597  av_frame_unref(frame);
1598  return AVERROR(ENOMEM);
1599 }
1600 
1602 {
1603  FramePool *pool = (FramePool*)s->internal->pool->data;
1605  int i;
1606 
1607  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1608  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1609  return -1;
1610  }
1611 
1612  if (!desc) {
1613  av_log(s, AV_LOG_ERROR,
1614  "Unable to get pixel format descriptor for format %s\n",
1615  av_get_pix_fmt_name(pic->format));
1616  return AVERROR(EINVAL);
1617  }
1618 
1619  memset(pic->data, 0, sizeof(pic->data));
1620  pic->extended_data = pic->data;
1621 
1622  for (i = 0; i < 4 && pool->pools[i]; i++) {
1623  pic->linesize[i] = pool->linesize[i];
1624 
1625  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1626  if (!pic->buf[i])
1627  goto fail;
1628 
1629  pic->data[i] = pic->buf[i]->data;
1630  }
1631  for (; i < AV_NUM_DATA_POINTERS; i++) {
1632  pic->data[i] = NULL;
1633  pic->linesize[i] = 0;
1634  }
1635  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1636  ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1637  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1638 
1639  if (s->debug & FF_DEBUG_BUFFERS)
1640  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1641 
1642  return 0;
1643 fail:
1644  av_frame_unref(pic);
1645  return AVERROR(ENOMEM);
1646 }
1647 
1649 {
1650  int ret;
1651 
1652  if (avctx->hw_frames_ctx) {
1653  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1654  frame->width = avctx->coded_width;
1655  frame->height = avctx->coded_height;
1656  return ret;
1657  }
1658 
1659  if ((ret = update_frame_pool(avctx, frame)) < 0)
1660  return ret;
1661 
1662  switch (avctx->codec_type) {
1663  case AVMEDIA_TYPE_VIDEO:
1664  return video_get_buffer(avctx, frame);
1665  case AVMEDIA_TYPE_AUDIO:
1666  return audio_get_buffer(avctx, frame);
1667  default:
1668  return -1;
1669  }
1670 }
1671 
1673 {
1674  int size;
1675  const uint8_t *side_metadata;
1676 
1677  AVDictionary **frame_md = &frame->metadata;
1678 
1679  side_metadata = av_packet_get_side_data(avpkt,
1681  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1682 }
1683 
1685 {
1686  const AVPacket *pkt = avctx->internal->last_pkt_props;
1687  int i;
1688  static const struct {
1689  enum AVPacketSideDataType packet;
1691  } sd[] = {
1702  };
1703 
1704  if (pkt) {
1705  frame->pts = pkt->pts;
1706 #if FF_API_PKT_PTS
1708  frame->pkt_pts = pkt->pts;
1710 #endif
1711  frame->pkt_pos = pkt->pos;
1712  frame->pkt_duration = pkt->duration;
1713  frame->pkt_size = pkt->size;
1714 
1715  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1716  int size;
1717  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1718  if (packet_sd) {
1719  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1720  sd[i].frame,
1721  size);
1722  if (!frame_sd)
1723  return AVERROR(ENOMEM);
1724 
1725  memcpy(frame_sd->data, packet_sd, size);
1726  }
1727  }
1728  add_metadata_from_side_data(pkt, frame);
1729 
1730  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1731  frame->flags |= AV_FRAME_FLAG_DISCARD;
1732  } else {
1733  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1734  }
1735  }
1736  frame->reordered_opaque = avctx->reordered_opaque;
1737 
1738  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1739  frame->color_primaries = avctx->color_primaries;
1740  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1741  frame->color_trc = avctx->color_trc;
1742  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1743  frame->colorspace = avctx->colorspace;
1744  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1745  frame->color_range = avctx->color_range;
1747  frame->chroma_location = avctx->chroma_sample_location;
1748 
1749  switch (avctx->codec->type) {
1750  case AVMEDIA_TYPE_VIDEO:
1751  frame->format = avctx->pix_fmt;
1752  if (!frame->sample_aspect_ratio.num)
1753  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1754 
1755  if (frame->width && frame->height &&
1756  av_image_check_sar(frame->width, frame->height,
1757  frame->sample_aspect_ratio) < 0) {
1758  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1759  frame->sample_aspect_ratio.num,
1760  frame->sample_aspect_ratio.den);
1761  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1762  }
1763 
1764  break;
1765  case AVMEDIA_TYPE_AUDIO:
1766  if (!frame->sample_rate)
1767  frame->sample_rate = avctx->sample_rate;
1768  if (frame->format < 0)
1769  frame->format = avctx->sample_fmt;
1770  if (!frame->channel_layout) {
1771  if (avctx->channel_layout) {
1773  avctx->channels) {
1774  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1775  "configuration.\n");
1776  return AVERROR(EINVAL);
1777  }
1778 
1779  frame->channel_layout = avctx->channel_layout;
1780  } else {
1781  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1782  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1783  avctx->channels);
1784  return AVERROR(ENOSYS);
1785  }
1786  }
1787  }
1788  frame->channels = avctx->channels;
1789  break;
1790  }
1791  return 0;
1792 }
1793 
1795 {
1796  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1797  int i;
1798  int num_planes = av_pix_fmt_count_planes(frame->format);
1800  int flags = desc ? desc->flags : 0;
1801  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1802  num_planes = 2;
1803  if ((flags & FF_PSEUDOPAL) && frame->data[1])
1804  num_planes = 2;
1805  for (i = 0; i < num_planes; i++) {
1806  av_assert0(frame->data[i]);
1807  }
1808  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1809  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1810  if (frame->data[i])
1811  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1812  frame->data[i] = NULL;
1813  }
1814  }
1815 }
1816 
1817 static void decode_data_free(void *opaque, uint8_t *data)
1818 {
1819  FrameDecodeData *fdd = (FrameDecodeData*)data;
1820 
1821  if (fdd->post_process_opaque_free)
1823 
1824  if (fdd->hwaccel_priv_free)
1825  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1826 
1827  av_freep(&fdd);
1828 }
1829 
1831 {
1832  AVBufferRef *fdd_buf;
1833  FrameDecodeData *fdd;
1834 
1835  av_assert1(!frame->private_ref);
1836  av_buffer_unref(&frame->private_ref);
1837 
1838  fdd = av_mallocz(sizeof(*fdd));
1839  if (!fdd)
1840  return AVERROR(ENOMEM);
1841 
1842  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1844  if (!fdd_buf) {
1845  av_freep(&fdd);
1846  return AVERROR(ENOMEM);
1847  }
1848 
1849  frame->private_ref = fdd_buf;
1850 
1851  return 0;
1852 }
1853 
1855 {
1856  const AVHWAccel *hwaccel = avctx->hwaccel;
1857  int override_dimensions = 1;
1858  int ret;
1859 
1860  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1861  if ((ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1862  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1863  ret = AVERROR(EINVAL);
1864  goto fail;
1865  }
1866 
1867  if (frame->width <= 0 || frame->height <= 0) {
1868  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1869  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1870  override_dimensions = 0;
1871  }
1872 
1873  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1874  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1875  ret = AVERROR(EINVAL);
1876  goto fail;
1877  }
1878  } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1879  if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) {
1880  av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
1881  ret = AVERROR(EINVAL);
1882  goto fail;
1883  }
1884  }
1885  ret = ff_decode_frame_props(avctx, frame);
1886  if (ret < 0)
1887  goto fail;
1888 
1889  if (hwaccel) {
1890  if (hwaccel->alloc_frame) {
1891  ret = hwaccel->alloc_frame(avctx, frame);
1892  goto end;
1893  }
1894  } else
1895  avctx->sw_pix_fmt = avctx->pix_fmt;
1896 
1897  ret = avctx->get_buffer2(avctx, frame, flags);
1898  if (ret < 0)
1899  goto fail;
1900 
1901  validate_avframe_allocation(avctx, frame);
1902 
1903  ret = ff_attach_decode_data(frame);
1904  if (ret < 0)
1905  goto fail;
1906 
1907 end:
1908  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1910  frame->width = avctx->width;
1911  frame->height = avctx->height;
1912  }
1913 
1914 fail:
1915  if (ret < 0) {
1916  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1917  av_frame_unref(frame);
1918  }
1919 
1920  return ret;
1921 }
1922 
1924 {
1925  AVFrame *tmp;
1926  int ret;
1927 
1929 
1930  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1931  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1932  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1933  av_frame_unref(frame);
1934  }
1935 
1936  if (!frame->data[0])
1937  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1938 
1939  if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
1940  return ff_decode_frame_props(avctx, frame);
1941 
1942  tmp = av_frame_alloc();
1943  if (!tmp)
1944  return AVERROR(ENOMEM);
1945 
1946  av_frame_move_ref(tmp, frame);
1947 
1948  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1949  if (ret < 0) {
1950  av_frame_free(&tmp);
1951  return ret;
1952  }
1953 
1954  av_frame_copy(frame, tmp);
1955  av_frame_free(&tmp);
1956 
1957  return 0;
1958 }
1959 
1961 {
1962  int ret = reget_buffer_internal(avctx, frame, flags);
1963  if (ret < 0)
1964  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1965  return ret;
1966 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:97
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1594
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:778
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
int nb_draining_errors
Definition: internal.h:195
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:2127
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1279
const struct AVCodec * codec
Definition: avcodec.h:535
AVRational framerate
Definition: avcodec.h:2069
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:2090
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:402
#define AV_NUM_DATA_POINTERS
Definition: frame.h:309
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: packet.h:40
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:2107
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:427
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
int stride_align[AV_NUM_DATA_POINTERS]
Definition: decode.c:60
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:2314
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:546
const struct AVCodecHWConfigInternal ** hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: codec.h:321
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:384
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:579
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:2443
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:147
const char * desc
Definition: libsvtav1.c:79
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:920
int changed_frames_dropped
Definition: internal.h:198
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
AVFrame * to_free
Definition: internal.h:134
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:383
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:821
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:105
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:499
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:533
int width
Definition: decode.c:59
int(* receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: codec.h:296
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: packet.h:114
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:242
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:517
int ff_decode_bsfs_init(AVCodecContext *avctx)
Called during avcodec_open2() to initialize avctx->internal->bsf.
Definition: decode.c:204
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
int size
Definition: packet.h:364
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:100
int initial_channels
Definition: internal.h:202
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:2437
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
static void frame_pool_free(void *opaque, uint8_t *data)
Definition: decode.c:1416
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
int samples
Definition: decode.c:64
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:311
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:813
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:2698
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:595
enum AVMediaType type
Definition: codec.h:203
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:67
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:828
AVBufferPool * pools[4]
Pools for each data plane.
Definition: decode.c:53
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1684
size_t crop_bottom
Definition: frame.h:669
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:978
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:2456
static int utf8_check(const uint8_t *str)
Definition: decode.c:886
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:622
Mastering display metadata (based on SMPTE-2086:2014).
Definition: packet.h:222
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:649
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:2699
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:2548
static AVBufferRef * frame_pool_alloc(void)
Definition: decode.c:1427
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int height
Definition: decode.c:59
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1067
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1830
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:530
AVOptions.
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:279
size_t crop_left
Definition: frame.h:670
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:176
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:381
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:271
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:687
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:238
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:430
int planes
Definition: decode.c:62
Structure to hold side data for an AVFrame.
Definition: frame.h:214
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:322
size_t compat_decode_consumed
Definition: internal.h:183
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: packet.h:363
#define FF_REGET_BUFFER_FLAG_READONLY
the returned buffer does not need to be writable
Definition: internal.h:296
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:594
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1765
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:2540
ptrdiff_t size
Definition: opengl_enc.c:100
int initial_height
Definition: internal.h:200
int initial_format
Definition: internal.h:199
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:953
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
The buffer pool.
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:615
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:155
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:88
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2083
AVBSFContext * bsf
Definition: internal.h:141
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
void * post_process_opaque
Definition: decode.h:46
#define AV_BPRINT_SIZE_UNLIMITED
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1230
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1794
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: packet.h:72
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:903
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ICC profile data consisting of an opaque octet buffer following the format described by ISO 15076-1...
Definition: packet.h:274
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:2108
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:1960
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1804
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:435
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:389
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:1927
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:649
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
Definition: decode.c:145
AVFrame * buffer_frame
Definition: internal.h:178
int capabilities
Codec capabilities.
Definition: codec.h:209
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:552
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:346
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:563
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: bsf.h:89
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
int side_data_elems
Definition: packet.h:375
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:2256
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:79
#define FFMAX(a, b)
Definition: common.h:94
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
#define fail()
Definition: checkasm.h:123
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:812
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwconfig.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:2554
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:369
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:147
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1237
uint32_t end_display_time
Definition: avcodec.h:2697
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2700
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:485
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
size_t crop_top
Definition: frame.h:668
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
Parse string describing list of bitstream filters and create single AVBSFContext describing the whole...
Definition: bsf.c:522
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:572
The codec supports this format by some ad-hoc method.
Definition: codec.h:418
int channels
number of audio channels, only used for audio.
Definition: frame.h:614
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:545
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1796
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2416
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:513
int channels
Definition: decode.c:63
AVFrame * compat_decode_frame
Definition: internal.h:187
int width
picture width / height.
Definition: avcodec.h:699
uint8_t w
Definition: llviddspenc.c:38
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:2226
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1672
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:725
AVPacket * in_pkt
Definition: internal.h:112
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: packet.h:228
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1140
AVFrameSideDataType
Definition: frame.h:48
#define AVERROR_INPUT_CHANGED
Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) ...
Definition: error.h:73
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint16_t format
Definition: avcodec.h:2695
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:1633
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:1683
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: codec.h:312
DecodeSimpleContext ds
Definition: internal.h:140
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:2116
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1556
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:172
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1785
int linesize[4]
Definition: decode.c:61
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:2124
if(ret)
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: utils.c:1064
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
Content light level (based on CTA-861.3).
Definition: packet.h:235
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:586
int(* decode)(struct AVCodecContext *, void *outdata, int *outdata_size, struct AVPacket *avpkt)
Definition: codec.h:282
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:47
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:200
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1648
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:534
int compat_decode_warned
Definition: internal.h:180
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:587
A list of zero terminated key/value strings.
Definition: packet.h:172
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:806
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:607
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
int sample_rate
samples per second
Definition: avcodec.h:1186
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
int initial_sample_rate
Definition: internal.h:201
int debug
debug
Definition: avcodec.h:1611
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1813
main external API structure.
Definition: avcodec.h:526
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
int skip_samples_multiplier
Definition: internal.h:192
uint8_t * data
The data buffer.
Definition: buffer.h:89
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:226
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1923
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1109
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:396
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
uint8_t * data
Definition: frame.h:216
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
size_t crop_right
Definition: frame.h:671
int64_t max_samples
The number of samples per frame to maximally accept.
Definition: avcodec.h:2344
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:714
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:475
int sample_rate
Sample rate of the audio data.
Definition: frame.h:480
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1341
int showed_multi_packet_warning
Definition: internal.h:190
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:739
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: codec.h:93
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:276
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2092
Recommmends skipping the specified number of samples.
Definition: packet.h:156
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:2233
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:162
#define STRIDE_ALIGN
Definition: internal.h:108
enum AVChromaLocation chroma_location
Definition: frame.h:565
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:2569
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:572
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:1357
AVBufferRef * pool
Definition: internal.h:136
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define AV_CODEC_FLAG_DROPCHANGED
Don&#39;t output frames whose parameters differ from first decoded frame in stream.
Definition: avcodec.h:292
int size
Size of data in bytes.
Definition: buffer.h:93
The codec supports this format by some internal method.
Definition: codec.h:409
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:566
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:229
#define flags(name, subs,...)
Definition: cbs_av1.c:560
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: packet.h:99
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1130
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1817
#define UTF8_MAX_BYTES
Definition: decode.c:827
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:409
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:291
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:177
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:417
A reference to a data buffer.
Definition: buffer.h:81
int extra_hw_frames
Definition: avcodec.h:2328
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:158
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: packet.h:374
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1181
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:46
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: packet.h:403
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:239
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:2576
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:114
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:906
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:167
int caps_internal
Internal codec capabilities.
Definition: codec.h:306
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:288
uint64_t initial_channel_layout
Definition: internal.h:203
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1601
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:2654
#define FF_PSEUDOPAL
Definition: internal.h:335
AVHWDeviceType
Definition: hwcontext.h:27
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int channels
number of audio channels
Definition: avcodec.h:1187
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:561
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:2689
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn&#39;t be output.
Definition: frame.h:537
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
enum AVColorPrimaries color_primaries
Definition: frame.h:554
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:362
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:2109
size_t compat_decode_partial_size
Definition: internal.h:186
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:317
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1445
int height
Definition: frame.h:366
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:2106
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:556
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:337
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:442
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:2236
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:2128
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVSubtitleType type
Definition: avcodec.h:2680
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
int format
Definition: decode.c:58
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:2278
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:623
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
AVCodecParameters * par_in
Parameters of the input stream.
Definition: bsf.h:77
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1589
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:2076
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:671
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1267
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: packet.h:120
static uint8_t tmp[11]
Definition: aes_ctr.c:26