FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 
40 #include "avcodec.h"
41 #include "bytestream.h"
42 #include "decode.h"
43 #include "hwaccel.h"
44 #include "internal.h"
45 #include "thread.h"
46 
47 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
48 {
49  int size = 0, ret;
50  const uint8_t *data;
51  uint32_t flags;
52  int64_t val;
53 
55  if (!data)
56  return 0;
57 
58  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
59  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
60  "changes, but PARAM_CHANGE side data was sent to it.\n");
61  ret = AVERROR(EINVAL);
62  goto fail2;
63  }
64 
65  if (size < 4)
66  goto fail;
67 
68  flags = bytestream_get_le32(&data);
69  size -= 4;
70 
72  if (size < 4)
73  goto fail;
74  val = bytestream_get_le32(&data);
75  if (val <= 0 || val > INT_MAX) {
76  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
77  ret = AVERROR_INVALIDDATA;
78  goto fail2;
79  }
80  avctx->channels = val;
81  size -= 4;
82  }
84  if (size < 8)
85  goto fail;
86  avctx->channel_layout = bytestream_get_le64(&data);
87  size -= 8;
88  }
90  if (size < 4)
91  goto fail;
92  val = bytestream_get_le32(&data);
93  if (val <= 0 || val > INT_MAX) {
94  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
95  ret = AVERROR_INVALIDDATA;
96  goto fail2;
97  }
98  avctx->sample_rate = val;
99  size -= 4;
100  }
102  if (size < 8)
103  goto fail;
104  avctx->width = bytestream_get_le32(&data);
105  avctx->height = bytestream_get_le32(&data);
106  size -= 8;
107  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
108  if (ret < 0)
109  goto fail2;
110  }
111 
112  return 0;
113 fail:
114  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
115  ret = AVERROR_INVALIDDATA;
116 fail2:
117  if (ret < 0) {
118  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
119  if (avctx->err_recognition & AV_EF_EXPLODE)
120  return ret;
121  }
122  return 0;
123 }
124 
126 {
127  int ret = 0;
128 
130  if (pkt) {
131  ret = av_packet_copy_props(avci->last_pkt_props, pkt);
132  if (!ret)
133  avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_init_buffer_info().
134  }
135  return ret;
136 }
137 
139 {
140  int ret;
141 
142  /* move the original frame to our backup */
143  av_frame_unref(avci->to_free);
144  av_frame_move_ref(avci->to_free, frame);
145 
146  /* now copy everything except the AVBufferRefs back
147  * note that we make a COPY of the side data, so calling av_frame_free() on
148  * the caller's frame will work properly */
149  ret = av_frame_copy_props(frame, avci->to_free);
150  if (ret < 0)
151  return ret;
152 
153  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
154  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
155  if (avci->to_free->extended_data != avci->to_free->data) {
156  int planes = avci->to_free->channels;
157  int size = planes * sizeof(*frame->extended_data);
158 
159  if (!size) {
160  av_frame_unref(frame);
161  return AVERROR_BUG;
162  }
163 
164  frame->extended_data = av_malloc(size);
165  if (!frame->extended_data) {
166  av_frame_unref(frame);
167  return AVERROR(ENOMEM);
168  }
169  memcpy(frame->extended_data, avci->to_free->extended_data,
170  size);
171  } else
172  frame->extended_data = frame->data;
173 
174  frame->format = avci->to_free->format;
175  frame->width = avci->to_free->width;
176  frame->height = avci->to_free->height;
177  frame->channel_layout = avci->to_free->channel_layout;
178  frame->nb_samples = avci->to_free->nb_samples;
179  frame->channels = avci->to_free->channels;
180 
181  return 0;
182 }
183 
184 static int bsfs_init(AVCodecContext *avctx)
185 {
186  AVCodecInternal *avci = avctx->internal;
187  DecodeFilterContext *s = &avci->filter;
188  const char *bsfs_str;
189  int ret;
190 
191  if (s->nb_bsfs)
192  return 0;
193 
194  bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null";
195  while (bsfs_str && *bsfs_str) {
196  AVBSFContext **tmp;
197  const AVBitStreamFilter *filter;
198  char *bsf;
199 
200  bsf = av_get_token(&bsfs_str, ",");
201  if (!bsf) {
202  ret = AVERROR(ENOMEM);
203  goto fail;
204  }
205 
206  filter = av_bsf_get_by_name(bsf);
207  if (!filter) {
208  av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s "
209  "requested by a decoder. This is a bug, please report it.\n",
210  bsf);
211  ret = AVERROR_BUG;
212  av_freep(&bsf);
213  goto fail;
214  }
215  av_freep(&bsf);
216 
217  tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs));
218  if (!tmp) {
219  ret = AVERROR(ENOMEM);
220  goto fail;
221  }
222  s->bsfs = tmp;
223  s->nb_bsfs++;
224 
225  ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]);
226  if (ret < 0)
227  goto fail;
228 
229  if (s->nb_bsfs == 1) {
230  /* We do not currently have an API for passing the input timebase into decoders,
231  * but no filters used here should actually need it.
232  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
233  s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 };
235  avctx);
236  } else {
237  s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out;
238  ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in,
239  s->bsfs[s->nb_bsfs - 2]->par_out);
240  }
241  if (ret < 0)
242  goto fail;
243 
244  ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]);
245  if (ret < 0)
246  goto fail;
247  }
248 
249  return 0;
250 fail:
251  ff_decode_bsfs_uninit(avctx);
252  return ret;
253 }
254 
255 /* try to get one output packet from the filter chain */
256 static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
257 {
258  DecodeFilterContext *s = &avctx->internal->filter;
259  int idx, ret;
260 
261  /* start with the last filter in the chain */
262  idx = s->nb_bsfs - 1;
263  while (idx >= 0) {
264  /* request a packet from the currently selected filter */
265  ret = av_bsf_receive_packet(s->bsfs[idx], pkt);
266  if (ret == AVERROR(EAGAIN)) {
267  /* no packets available, try the next filter up the chain */
268  ret = 0;
269  idx--;
270  continue;
271  } else if (ret < 0 && ret != AVERROR_EOF) {
272  return ret;
273  }
274 
275  /* got a packet or EOF -- pass it to the caller or to the next filter
276  * down the chain */
277  if (idx == s->nb_bsfs - 1) {
278  return ret;
279  } else {
280  idx++;
281  ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt);
282  if (ret < 0) {
283  av_log(avctx, AV_LOG_ERROR,
284  "Error pre-processing a packet before decoding\n");
285  av_packet_unref(pkt);
286  return ret;
287  }
288  }
289  }
290 
291  return AVERROR(EAGAIN);
292 }
293 
295 {
296  AVCodecInternal *avci = avctx->internal;
297  int ret;
298 
299  if (avci->draining)
300  return AVERROR_EOF;
301 
302  ret = bsfs_poll(avctx, pkt);
303  if (ret == AVERROR_EOF)
304  avci->draining = 1;
305  if (ret < 0)
306  return ret;
307 
308  ret = extract_packet_props(avctx->internal, pkt);
309  if (ret < 0)
310  goto finish;
311 
312  ret = apply_param_change(avctx, pkt);
313  if (ret < 0)
314  goto finish;
315 
316  if (avctx->codec->receive_frame)
317  avci->compat_decode_consumed += pkt->size;
318 
319  return 0;
320 finish:
321  av_packet_unref(pkt);
322  return ret;
323 }
324 
325 /**
326  * Attempt to guess proper monotonic timestamps for decoded video frames
327  * which might have incorrect times. Input timestamps may wrap around, in
328  * which case the output will as well.
329  *
330  * @param pts the pts field of the decoded AVPacket, as passed through
331  * AVFrame.pts
332  * @param dts the dts field of the decoded AVPacket
333  * @return one of the input values, may be AV_NOPTS_VALUE
334  */
336  int64_t reordered_pts, int64_t dts)
337 {
338  int64_t pts = AV_NOPTS_VALUE;
339 
340  if (dts != AV_NOPTS_VALUE) {
342  ctx->pts_correction_last_dts = dts;
343  } else if (reordered_pts != AV_NOPTS_VALUE)
344  ctx->pts_correction_last_dts = reordered_pts;
345 
346  if (reordered_pts != AV_NOPTS_VALUE) {
347  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
348  ctx->pts_correction_last_pts = reordered_pts;
349  } else if(dts != AV_NOPTS_VALUE)
350  ctx->pts_correction_last_pts = dts;
351 
353  && reordered_pts != AV_NOPTS_VALUE)
354  pts = reordered_pts;
355  else
356  pts = dts;
357 
358  return pts;
359 }
360 
361 /*
362  * The core of the receive_frame_wrapper for the decoders implementing
363  * the simple API. Certain decoders might consume partial packets without
364  * returning any output, so this function needs to be called in a loop until it
365  * returns EAGAIN.
366  **/
368 {
369  AVCodecInternal *avci = avctx->internal;
370  DecodeSimpleContext *ds = &avci->ds;
371  AVPacket *pkt = ds->in_pkt;
372  // copy to ensure we do not change pkt
373  int got_frame, actual_got_frame;
374  int ret;
375 
376  if (!pkt->data && !avci->draining) {
377  av_packet_unref(pkt);
378  ret = ff_decode_get_packet(avctx, pkt);
379  if (ret < 0 && ret != AVERROR_EOF)
380  return ret;
381  }
382 
383  // Some codecs (at least wma lossless) will crash when feeding drain packets
384  // after EOF was signaled.
385  if (avci->draining_done)
386  return AVERROR_EOF;
387 
388  if (!pkt->data &&
389  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
391  return AVERROR_EOF;
392 
393  got_frame = 0;
394 
395  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
396  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
397  } else {
398  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
399 
401  frame->pkt_dts = pkt->dts;
402  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
403  if(!avctx->has_b_frames)
404  frame->pkt_pos = pkt->pos;
405  //FIXME these should be under if(!avctx->has_b_frames)
406  /* get_buffer is supposed to set frame parameters */
407  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
408  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
409  if (!frame->width) frame->width = avctx->width;
410  if (!frame->height) frame->height = avctx->height;
411  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
412  }
413  }
414  }
415  emms_c();
416  actual_got_frame = got_frame;
417 
418  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
419  if (frame->flags & AV_FRAME_FLAG_DISCARD)
420  got_frame = 0;
421  if (got_frame)
423  frame->pts,
424  frame->pkt_dts);
425  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
426  uint8_t *side;
427  int side_size;
428  uint32_t discard_padding = 0;
429  uint8_t skip_reason = 0;
430  uint8_t discard_reason = 0;
431 
432  if (ret >= 0 && got_frame) {
434  frame->pts,
435  frame->pkt_dts);
436  if (frame->format == AV_SAMPLE_FMT_NONE)
437  frame->format = avctx->sample_fmt;
438  if (!frame->channel_layout)
439  frame->channel_layout = avctx->channel_layout;
440  if (!frame->channels)
441  frame->channels = avctx->channels;
442  if (!frame->sample_rate)
443  frame->sample_rate = avctx->sample_rate;
444  }
445 
447  if(side && side_size>=10) {
449  discard_padding = AV_RL32(side + 4);
450  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
451  avctx->internal->skip_samples, (int)discard_padding);
452  skip_reason = AV_RL8(side + 8);
453  discard_reason = AV_RL8(side + 9);
454  }
455 
456  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
457  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
458  avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
459  got_frame = 0;
460  }
461 
462  if (avctx->internal->skip_samples > 0 && got_frame &&
463  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
464  if(frame->nb_samples <= avctx->internal->skip_samples){
465  got_frame = 0;
466  avctx->internal->skip_samples -= frame->nb_samples;
467  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
468  avctx->internal->skip_samples);
469  } else {
471  frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
472  if(avctx->pkt_timebase.num && avctx->sample_rate) {
473  int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
474  (AVRational){1, avctx->sample_rate},
475  avctx->pkt_timebase);
476  if(frame->pts!=AV_NOPTS_VALUE)
477  frame->pts += diff_ts;
478 #if FF_API_PKT_PTS
480  if(frame->pkt_pts!=AV_NOPTS_VALUE)
481  frame->pkt_pts += diff_ts;
483 #endif
484  if(frame->pkt_dts!=AV_NOPTS_VALUE)
485  frame->pkt_dts += diff_ts;
486  if (frame->pkt_duration >= diff_ts)
487  frame->pkt_duration -= diff_ts;
488  } else {
489  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
490  }
491  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
492  avctx->internal->skip_samples, frame->nb_samples);
493  frame->nb_samples -= avctx->internal->skip_samples;
494  avctx->internal->skip_samples = 0;
495  }
496  }
497 
498  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
499  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
500  if (discard_padding == frame->nb_samples) {
501  got_frame = 0;
502  } else {
503  if(avctx->pkt_timebase.num && avctx->sample_rate) {
504  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
505  (AVRational){1, avctx->sample_rate},
506  avctx->pkt_timebase);
507  frame->pkt_duration = diff_ts;
508  } else {
509  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
510  }
511  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
512  (int)discard_padding, frame->nb_samples);
513  frame->nb_samples -= discard_padding;
514  }
515  }
516 
517  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
519  if (fside) {
520  AV_WL32(fside->data, avctx->internal->skip_samples);
521  AV_WL32(fside->data + 4, discard_padding);
522  AV_WL8(fside->data + 8, skip_reason);
523  AV_WL8(fside->data + 9, discard_reason);
524  avctx->internal->skip_samples = 0;
525  }
526  }
527  }
528 
529  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
530  !avci->showed_multi_packet_warning &&
531  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
532  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
533  avci->showed_multi_packet_warning = 1;
534  }
535 
536  if (!got_frame)
538 
539  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
540  ret = pkt->size;
541 
542 #if FF_API_AVCTX_TIMEBASE
543  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
544  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
545 #endif
546 
547  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
548  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
549  if (avctx->internal->draining && !actual_got_frame) {
550  if (ret < 0) {
551  /* prevent infinite loop if a decoder wrongly always return error on draining */
552  /* reasonable nb_errors_max = maximum b frames + thread count */
553  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
554  avctx->thread_count : 1);
555 
556  if (avci->nb_draining_errors++ >= nb_errors_max) {
557  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
558  "Stop draining and force EOF.\n");
559  avci->draining_done = 1;
560  ret = AVERROR_BUG;
561  }
562  } else {
563  avci->draining_done = 1;
564  }
565  }
566 
567  avci->compat_decode_consumed += ret;
568 
569  if (ret >= pkt->size || ret < 0) {
571  } else {
572  int consumed = ret;
573 
574  pkt->data += consumed;
575  pkt->size -= consumed;
576  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
579  avci->last_pkt_props->pts = AV_NOPTS_VALUE;
580  avci->last_pkt_props->dts = AV_NOPTS_VALUE;
581  }
582 
583  if (got_frame)
584  av_assert0(frame->buf[0]);
585 
586  return ret < 0 ? ret : 0;
587 }
588 
590 {
591  int ret;
592 
593  while (!frame->buf[0]) {
594  ret = decode_simple_internal(avctx, frame);
595  if (ret < 0)
596  return ret;
597  }
598 
599  return 0;
600 }
601 
603 {
604  AVCodecInternal *avci = avctx->internal;
605  int ret;
606 
607  av_assert0(!frame->buf[0]);
608 
609  if (avctx->codec->receive_frame)
610  ret = avctx->codec->receive_frame(avctx, frame);
611  else
612  ret = decode_simple_receive_frame(avctx, frame);
613 
614  if (ret == AVERROR_EOF)
615  avci->draining_done = 1;
616 
617  if (!ret) {
618  /* the only case where decode data is not set should be decoders
619  * that do not call ff_get_buffer() */
620  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
621  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
622 
623  if (frame->private_ref) {
625 
626  if (fdd->post_process) {
627  ret = fdd->post_process(avctx, frame);
628  if (ret < 0) {
629  av_frame_unref(frame);
630  return ret;
631  }
632  }
633  }
634  }
635 
636  /* free the per-frame decode data */
637  av_buffer_unref(&frame->private_ref);
638 
639  return ret;
640 }
641 
642 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
643 {
644  AVCodecInternal *avci = avctx->internal;
645  int ret;
646 
647  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
648  return AVERROR(EINVAL);
649 
650  if (avctx->internal->draining)
651  return AVERROR_EOF;
652 
653  if (avpkt && !avpkt->size && avpkt->data)
654  return AVERROR(EINVAL);
655 
656  ret = bsfs_init(avctx);
657  if (ret < 0)
658  return ret;
659 
661  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
662  ret = av_packet_ref(avci->buffer_pkt, avpkt);
663  if (ret < 0)
664  return ret;
665  }
666 
667  ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
668  if (ret < 0) {
670  return ret;
671  }
672 
673  if (!avci->buffer_frame->buf[0]) {
674  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
675  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
676  return ret;
677  }
678 
679  return 0;
680 }
681 
683 {
684  /* make sure we are noisy about decoders returning invalid cropping data */
685  if (frame->crop_left >= INT_MAX - frame->crop_right ||
686  frame->crop_top >= INT_MAX - frame->crop_bottom ||
687  (frame->crop_left + frame->crop_right) >= frame->width ||
688  (frame->crop_top + frame->crop_bottom) >= frame->height) {
689  av_log(avctx, AV_LOG_WARNING,
690  "Invalid cropping information set by a decoder: "
692  "(frame size %dx%d). This is a bug, please report it\n",
693  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
694  frame->width, frame->height);
695  frame->crop_left = 0;
696  frame->crop_right = 0;
697  frame->crop_top = 0;
698  frame->crop_bottom = 0;
699  return 0;
700  }
701 
702  if (!avctx->apply_cropping)
703  return 0;
704 
705  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
707 }
708 
709 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
710 {
711  AVCodecInternal *avci = avctx->internal;
712  int ret;
713 
714  av_frame_unref(frame);
715 
716  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
717  return AVERROR(EINVAL);
718 
719  ret = bsfs_init(avctx);
720  if (ret < 0)
721  return ret;
722 
723  if (avci->buffer_frame->buf[0]) {
724  av_frame_move_ref(frame, avci->buffer_frame);
725  } else {
726  ret = decode_receive_frame_internal(avctx, frame);
727  if (ret < 0)
728  return ret;
729  }
730 
731  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
732  ret = apply_cropping(avctx, frame);
733  if (ret < 0) {
734  av_frame_unref(frame);
735  return ret;
736  }
737  }
738 
739  avctx->frame_number++;
740 
741  return 0;
742 }
743 
745  int *got_frame, const AVPacket *pkt)
746 {
747  AVCodecInternal *avci = avctx->internal;
748  int ret = 0;
749 
751 
752  if (avci->draining_done && pkt && pkt->size != 0) {
753  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
754  avcodec_flush_buffers(avctx);
755  }
756 
757  *got_frame = 0;
758  avci->compat_decode = 1;
759 
760  if (avci->compat_decode_partial_size > 0 &&
761  avci->compat_decode_partial_size != pkt->size) {
762  av_log(avctx, AV_LOG_ERROR,
763  "Got unexpected packet size after a partial decode\n");
764  ret = AVERROR(EINVAL);
765  goto finish;
766  }
767 
768  if (!avci->compat_decode_partial_size) {
769  ret = avcodec_send_packet(avctx, pkt);
770  if (ret == AVERROR_EOF)
771  ret = 0;
772  else if (ret == AVERROR(EAGAIN)) {
773  /* we fully drain all the output in each decode call, so this should not
774  * ever happen */
775  ret = AVERROR_BUG;
776  goto finish;
777  } else if (ret < 0)
778  goto finish;
779  }
780 
781  while (ret >= 0) {
782  ret = avcodec_receive_frame(avctx, frame);
783  if (ret < 0) {
784  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
785  ret = 0;
786  goto finish;
787  }
788 
789  if (frame != avci->compat_decode_frame) {
790  if (!avctx->refcounted_frames) {
791  ret = unrefcount_frame(avci, frame);
792  if (ret < 0)
793  goto finish;
794  }
795 
796  *got_frame = 1;
797  frame = avci->compat_decode_frame;
798  } else {
799  if (!avci->compat_decode_warned) {
800  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
801  "API cannot return all the frames for this decoder. "
802  "Some frames will be dropped. Update your code to the "
803  "new decoding API to fix this.\n");
804  avci->compat_decode_warned = 1;
805  }
806  }
807 
808  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
809  break;
810  }
811 
812 finish:
813  if (ret == 0) {
814  /* if there are any bsfs then assume full packet is always consumed */
815  if (avctx->codec->bsfs)
816  ret = pkt->size;
817  else
818  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
819  }
820  avci->compat_decode_consumed = 0;
821  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
822 
823  return ret;
824 }
825 
826 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
827  int *got_picture_ptr,
828  const AVPacket *avpkt)
829 {
830  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
831 }
832 
833 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
834  AVFrame *frame,
835  int *got_frame_ptr,
836  const AVPacket *avpkt)
837 {
838  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
839 }
840 
842 {
843  memset(sub, 0, sizeof(*sub));
844  sub->pts = AV_NOPTS_VALUE;
845 }
846 
847 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
848 static int recode_subtitle(AVCodecContext *avctx,
849  AVPacket *outpkt, const AVPacket *inpkt)
850 {
851 #if CONFIG_ICONV
852  iconv_t cd = (iconv_t)-1;
853  int ret = 0;
854  char *inb, *outb;
855  size_t inl, outl;
856  AVPacket tmp;
857 #endif
858 
859  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
860  return 0;
861 
862 #if CONFIG_ICONV
863  cd = iconv_open("UTF-8", avctx->sub_charenc);
864  av_assert0(cd != (iconv_t)-1);
865 
866  inb = inpkt->data;
867  inl = inpkt->size;
868 
869  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
870  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
871  ret = AVERROR(ENOMEM);
872  goto end;
873  }
874 
875  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
876  if (ret < 0)
877  goto end;
878  outpkt->buf = tmp.buf;
879  outpkt->data = tmp.data;
880  outpkt->size = tmp.size;
881  outb = outpkt->data;
882  outl = outpkt->size;
883 
884  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
885  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
886  outl >= outpkt->size || inl != 0) {
887  ret = FFMIN(AVERROR(errno), -1);
888  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
889  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
890  av_packet_unref(&tmp);
891  goto end;
892  }
893  outpkt->size -= outl;
894  memset(outpkt->data + outpkt->size, 0, outl);
895 
896 end:
897  if (cd != (iconv_t)-1)
898  iconv_close(cd);
899  return ret;
900 #else
901  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
902  return AVERROR(EINVAL);
903 #endif
904 }
905 
906 static int utf8_check(const uint8_t *str)
907 {
908  const uint8_t *byte;
909  uint32_t codepoint, min;
910 
911  while (*str) {
912  byte = str;
913  GET_UTF8(codepoint, *(byte++), return 0;);
914  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
915  1 << (5 * (byte - str) - 4);
916  if (codepoint < min || codepoint >= 0x110000 ||
917  codepoint == 0xFFFE /* BOM */ ||
918  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
919  return 0;
920  str = byte;
921  }
922  return 1;
923 }
924 
925 #if FF_API_ASS_TIMING
926 static void insert_ts(AVBPrint *buf, int ts)
927 {
928  if (ts == -1) {
929  av_bprintf(buf, "9:59:59.99,");
930  } else {
931  int h, m, s;
932 
933  h = ts/360000; ts -= 360000*h;
934  m = ts/ 6000; ts -= 6000*m;
935  s = ts/ 100; ts -= 100*s;
936  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
937  }
938 }
939 
941 {
942  int i;
943  AVBPrint buf;
944 
946 
947  for (i = 0; i < sub->num_rects; i++) {
948  char *final_dialog;
949  const char *dialog;
950  AVSubtitleRect *rect = sub->rects[i];
951  int ts_start, ts_duration = -1;
952  long int layer;
953 
954  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
955  continue;
956 
957  av_bprint_clear(&buf);
958 
959  /* skip ReadOrder */
960  dialog = strchr(rect->ass, ',');
961  if (!dialog)
962  continue;
963  dialog++;
964 
965  /* extract Layer or Marked */
966  layer = strtol(dialog, (char**)&dialog, 10);
967  if (*dialog != ',')
968  continue;
969  dialog++;
970 
971  /* rescale timing to ASS time base (ms) */
972  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
973  if (pkt->duration != -1)
974  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
975  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
976 
977  /* construct ASS (standalone file form with timestamps) string */
978  av_bprintf(&buf, "Dialogue: %ld,", layer);
979  insert_ts(&buf, ts_start);
980  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
981  av_bprintf(&buf, "%s\r\n", dialog);
982 
983  final_dialog = av_strdup(buf.str);
984  if (!av_bprint_is_complete(&buf) || !final_dialog) {
985  av_freep(&final_dialog);
986  av_bprint_finalize(&buf, NULL);
987  return AVERROR(ENOMEM);
988  }
989  av_freep(&rect->ass);
990  rect->ass = final_dialog;
991  }
992 
993  av_bprint_finalize(&buf, NULL);
994  return 0;
995 }
996 #endif
997 
999  int *got_sub_ptr,
1000  AVPacket *avpkt)
1001 {
1002  int i, ret = 0;
1003 
1004  if (!avpkt->data && avpkt->size) {
1005  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1006  return AVERROR(EINVAL);
1007  }
1008  if (!avctx->codec)
1009  return AVERROR(EINVAL);
1010  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1011  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1012  return AVERROR(EINVAL);
1013  }
1014 
1015  *got_sub_ptr = 0;
1016  get_subtitle_defaults(sub);
1017 
1018  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1019  AVPacket pkt_recoded = *avpkt;
1020 
1021  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1022  if (ret < 0) {
1023  *got_sub_ptr = 0;
1024  } else {
1025  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1026  if (ret < 0)
1027  return ret;
1028 
1029  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1030  sub->pts = av_rescale_q(avpkt->pts,
1031  avctx->pkt_timebase, AV_TIME_BASE_Q);
1032  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1033  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1034  !!*got_sub_ptr >= !!sub->num_rects);
1035 
1036 #if FF_API_ASS_TIMING
1038  && *got_sub_ptr && sub->num_rects) {
1039  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1040  : avctx->time_base;
1041  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1042  if (err < 0)
1043  ret = err;
1044  }
1045 #endif
1046 
1047  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1048  avctx->pkt_timebase.num) {
1049  AVRational ms = { 1, 1000 };
1050  sub->end_display_time = av_rescale_q(avpkt->duration,
1051  avctx->pkt_timebase, ms);
1052  }
1053 
1055  sub->format = 0;
1056  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1057  sub->format = 1;
1058 
1059  for (i = 0; i < sub->num_rects; i++) {
1060  if (sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1061  av_log(avctx, AV_LOG_ERROR,
1062  "Invalid UTF-8 in decoded subtitles text; "
1063  "maybe missing -sub_charenc option\n");
1064  avsubtitle_free(sub);
1065  ret = AVERROR_INVALIDDATA;
1066  break;
1067  }
1068  }
1069 
1070  if (avpkt->data != pkt_recoded.data) { // did we recode?
1071  /* prevent from destroying side data from original packet */
1072  pkt_recoded.side_data = NULL;
1073  pkt_recoded.side_data_elems = 0;
1074 
1075  av_packet_unref(&pkt_recoded);
1076  }
1077  }
1078 
1079  if (*got_sub_ptr)
1080  avctx->frame_number++;
1081  }
1082 
1083  return ret;
1084 }
1085 
1087  const enum AVPixelFormat *fmt)
1088 {
1089  const AVPixFmtDescriptor *desc;
1090  const AVCodecHWConfig *config;
1091  int i, n;
1092 
1093  // If a device was supplied when the codec was opened, assume that the
1094  // user wants to use it.
1095  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1096  AVHWDeviceContext *device_ctx =
1098  for (i = 0;; i++) {
1099  config = &avctx->codec->hw_configs[i]->public;
1100  if (!config)
1101  break;
1102  if (!(config->methods &
1104  continue;
1105  if (device_ctx->type != config->device_type)
1106  continue;
1107  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1108  if (config->pix_fmt == fmt[n])
1109  return fmt[n];
1110  }
1111  }
1112  }
1113  // No device or other setup, so we have to choose from things which
1114  // don't any other external information.
1115 
1116  // If the last element of the list is a software format, choose it
1117  // (this should be best software format if any exist).
1118  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1119  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1120  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1121  return fmt[n - 1];
1122 
1123  // Finally, traverse the list in order and choose the first entry
1124  // with no external dependencies (if there is no hardware configuration
1125  // information available then this just picks the first entry).
1126  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1127  for (i = 0;; i++) {
1128  config = avcodec_get_hw_config(avctx->codec, i);
1129  if (!config)
1130  break;
1131  if (config->pix_fmt == fmt[n])
1132  break;
1133  }
1134  if (!config) {
1135  // No specific config available, so the decoder must be able
1136  // to handle this format without any additional setup.
1137  return fmt[n];
1138  }
1140  // Usable with only internal setup.
1141  return fmt[n];
1142  }
1143  }
1144 
1145  // Nothing is usable, give up.
1146  return AV_PIX_FMT_NONE;
1147 }
1148 
1150  enum AVHWDeviceType dev_type)
1151 {
1152  AVHWDeviceContext *device_ctx;
1153  AVHWFramesContext *frames_ctx;
1154  int ret;
1155 
1156  if (!avctx->hwaccel)
1157  return AVERROR(ENOSYS);
1158 
1159  if (avctx->hw_frames_ctx)
1160  return 0;
1161  if (!avctx->hw_device_ctx) {
1162  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1163  "required for hardware accelerated decoding.\n");
1164  return AVERROR(EINVAL);
1165  }
1166 
1167  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1168  if (device_ctx->type != dev_type) {
1169  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1170  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1171  av_hwdevice_get_type_name(device_ctx->type));
1172  return AVERROR(EINVAL);
1173  }
1174 
1176  avctx->hw_device_ctx,
1177  avctx->hwaccel->pix_fmt,
1178  &avctx->hw_frames_ctx);
1179  if (ret < 0)
1180  return ret;
1181 
1182  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1183 
1184 
1185  if (frames_ctx->initial_pool_size) {
1186  // We guarantee 4 base work surfaces. The function above guarantees 1
1187  // (the absolute minimum), so add the missing count.
1188  frames_ctx->initial_pool_size += 3;
1189 
1190  // Add an additional surface per thread is frame threading is enabled.
1191  if (avctx->active_thread_type & FF_THREAD_FRAME)
1192  frames_ctx->initial_pool_size += avctx->thread_count;
1193  }
1194 
1195  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1196  if (ret < 0) {
1197  av_buffer_unref(&avctx->hw_frames_ctx);
1198  return ret;
1199  }
1200 
1201  return 0;
1202 }
1203 
1205  AVBufferRef *device_ref,
1207  AVBufferRef **out_frames_ref)
1208 {
1209  AVBufferRef *frames_ref = NULL;
1210  const AVCodecHWConfigInternal *hw_config;
1211  const AVHWAccel *hwa;
1212  int i, ret;
1213 
1214  for (i = 0;; i++) {
1215  hw_config = avctx->codec->hw_configs[i];
1216  if (!hw_config)
1217  return AVERROR(ENOENT);
1218  if (hw_config->public.pix_fmt == hw_pix_fmt)
1219  break;
1220  }
1221 
1222  hwa = hw_config->hwaccel;
1223  if (!hwa || !hwa->frame_params)
1224  return AVERROR(ENOENT);
1225 
1226  frames_ref = av_hwframe_ctx_alloc(device_ref);
1227  if (!frames_ref)
1228  return AVERROR(ENOMEM);
1229 
1230  ret = hwa->frame_params(avctx, frames_ref);
1231  if (ret >= 0) {
1232  *out_frames_ref = frames_ref;
1233  } else {
1234  av_buffer_unref(&frames_ref);
1235  }
1236  return ret;
1237 }
1238 
1239 static int hwaccel_init(AVCodecContext *avctx,
1240  const AVCodecHWConfigInternal *hw_config)
1241 {
1242  const AVHWAccel *hwaccel;
1243  int err;
1244 
1245  hwaccel = hw_config->hwaccel;
1248  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1249  hwaccel->name);
1250  return AVERROR_PATCHWELCOME;
1251  }
1252 
1253  if (hwaccel->priv_data_size) {
1254  avctx->internal->hwaccel_priv_data =
1255  av_mallocz(hwaccel->priv_data_size);
1256  if (!avctx->internal->hwaccel_priv_data)
1257  return AVERROR(ENOMEM);
1258  }
1259 
1260  avctx->hwaccel = hwaccel;
1261  if (hwaccel->init) {
1262  err = hwaccel->init(avctx);
1263  if (err < 0) {
1264  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1265  "hwaccel initialisation returned error.\n",
1266  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1268  avctx->hwaccel = NULL;
1269  return err;
1270  }
1271  }
1272 
1273  return 0;
1274 }
1275 
1276 static void hwaccel_uninit(AVCodecContext *avctx)
1277 {
1278  if (avctx->hwaccel && avctx->hwaccel->uninit)
1279  avctx->hwaccel->uninit(avctx);
1280 
1282 
1283  avctx->hwaccel = NULL;
1284 
1285  av_buffer_unref(&avctx->hw_frames_ctx);
1286 }
1287 
1289 {
1290  const AVPixFmtDescriptor *desc;
1291  enum AVPixelFormat *choices;
1292  enum AVPixelFormat ret, user_choice;
1293  const AVCodecHWConfigInternal *hw_config;
1294  const AVCodecHWConfig *config;
1295  int i, n, err;
1296 
1297  // Find end of list.
1298  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1299  // Must contain at least one entry.
1300  av_assert0(n >= 1);
1301  // If a software format is available, it must be the last entry.
1302  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1303  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1304  // No software format is available.
1305  } else {
1306  avctx->sw_pix_fmt = fmt[n - 1];
1307  }
1308 
1309  choices = av_malloc_array(n + 1, sizeof(*choices));
1310  if (!choices)
1311  return AV_PIX_FMT_NONE;
1312 
1313  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1314 
1315  for (;;) {
1316  // Remove the previous hwaccel, if there was one.
1317  hwaccel_uninit(avctx);
1318 
1319  user_choice = avctx->get_format(avctx, choices);
1320  if (user_choice == AV_PIX_FMT_NONE) {
1321  // Explicitly chose nothing, give up.
1322  ret = AV_PIX_FMT_NONE;
1323  break;
1324  }
1325 
1326  desc = av_pix_fmt_desc_get(user_choice);
1327  if (!desc) {
1328  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1329  "get_format() callback.\n");
1330  ret = AV_PIX_FMT_NONE;
1331  break;
1332  }
1333  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1334  desc->name);
1335 
1336  for (i = 0; i < n; i++) {
1337  if (choices[i] == user_choice)
1338  break;
1339  }
1340  if (i == n) {
1341  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1342  "%s not in possible list.\n", desc->name);
1343  break;
1344  }
1345 
1346  if (avctx->codec->hw_configs) {
1347  for (i = 0;; i++) {
1348  hw_config = avctx->codec->hw_configs[i];
1349  if (!hw_config)
1350  break;
1351  if (hw_config->public.pix_fmt == user_choice)
1352  break;
1353  }
1354  } else {
1355  hw_config = NULL;
1356  }
1357 
1358  if (!hw_config) {
1359  // No config available, so no extra setup required.
1360  ret = user_choice;
1361  break;
1362  }
1363  config = &hw_config->public;
1364 
1365  if (config->methods &
1367  avctx->hw_frames_ctx) {
1368  const AVHWFramesContext *frames_ctx =
1370  if (frames_ctx->format != user_choice) {
1371  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1372  "does not match the format of the provided frames "
1373  "context.\n", desc->name);
1374  goto try_again;
1375  }
1376  } else if (config->methods &
1378  avctx->hw_device_ctx) {
1379  const AVHWDeviceContext *device_ctx =
1381  if (device_ctx->type != config->device_type) {
1382  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1383  "does not match the type of the provided device "
1384  "context.\n", desc->name);
1385  goto try_again;
1386  }
1387  } else if (config->methods &
1389  // Internal-only setup, no additional configuration.
1390  } else if (config->methods &
1392  // Some ad-hoc configuration we can't see and can't check.
1393  } else {
1394  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1395  "missing configuration.\n", desc->name);
1396  goto try_again;
1397  }
1398  if (hw_config->hwaccel) {
1399  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1400  "initialisation.\n", desc->name);
1401  err = hwaccel_init(avctx, hw_config);
1402  if (err < 0)
1403  goto try_again;
1404  }
1405  ret = user_choice;
1406  break;
1407 
1408  try_again:
1409  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1410  "get_format() without it.\n", desc->name);
1411  for (i = 0; i < n; i++) {
1412  if (choices[i] == user_choice)
1413  break;
1414  }
1415  for (; i + 1 < n; i++)
1416  choices[i] = choices[i + 1];
1417  --n;
1418  }
1419 
1420  av_freep(&choices);
1421  return ret;
1422 }
1423 
1425 {
1426  FramePool *pool = avctx->internal->pool;
1427  int i, ret;
1428 
1429  switch (avctx->codec_type) {
1430  case AVMEDIA_TYPE_VIDEO: {
1431  uint8_t *data[4];
1432  int linesize[4];
1433  int size[4] = { 0 };
1434  int w = frame->width;
1435  int h = frame->height;
1436  int tmpsize, unaligned;
1437 
1438  if (pool->format == frame->format &&
1439  pool->width == frame->width && pool->height == frame->height)
1440  return 0;
1441 
1442  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1443 
1444  do {
1445  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1446  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1447  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1448  if (ret < 0)
1449  return ret;
1450  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1451  w += w & ~(w - 1);
1452 
1453  unaligned = 0;
1454  for (i = 0; i < 4; i++)
1455  unaligned |= linesize[i] % pool->stride_align[i];
1456  } while (unaligned);
1457 
1458  tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
1459  NULL, linesize);
1460  if (tmpsize < 0)
1461  return -1;
1462 
1463  for (i = 0; i < 3 && data[i + 1]; i++)
1464  size[i] = data[i + 1] - data[i];
1465  size[i] = tmpsize - (data[i] - data[0]);
1466 
1467  for (i = 0; i < 4; i++) {
1468  av_buffer_pool_uninit(&pool->pools[i]);
1469  pool->linesize[i] = linesize[i];
1470  if (size[i]) {
1471  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1472  CONFIG_MEMORY_POISONING ?
1473  NULL :
1475  if (!pool->pools[i]) {
1476  ret = AVERROR(ENOMEM);
1477  goto fail;
1478  }
1479  }
1480  }
1481  pool->format = frame->format;
1482  pool->width = frame->width;
1483  pool->height = frame->height;
1484 
1485  break;
1486  }
1487  case AVMEDIA_TYPE_AUDIO: {
1488  int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout);
1489  int planar = av_sample_fmt_is_planar(frame->format);
1490  int planes = planar ? ch : 1;
1491 
1492  if (pool->format == frame->format && pool->planes == planes &&
1493  pool->channels == ch && frame->nb_samples == pool->samples)
1494  return 0;
1495 
1496  av_buffer_pool_uninit(&pool->pools[0]);
1497  ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1498  frame->nb_samples, frame->format, 0);
1499  if (ret < 0)
1500  goto fail;
1501 
1502  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1503  if (!pool->pools[0]) {
1504  ret = AVERROR(ENOMEM);
1505  goto fail;
1506  }
1507 
1508  pool->format = frame->format;
1509  pool->planes = planes;
1510  pool->channels = ch;
1511  pool->samples = frame->nb_samples;
1512  break;
1513  }
1514  default: av_assert0(0);
1515  }
1516  return 0;
1517 fail:
1518  for (i = 0; i < 4; i++)
1519  av_buffer_pool_uninit(&pool->pools[i]);
1520  pool->format = -1;
1521  pool->planes = pool->channels = pool->samples = 0;
1522  pool->width = pool->height = 0;
1523  return ret;
1524 }
1525 
1527 {
1528  FramePool *pool = avctx->internal->pool;
1529  int planes = pool->planes;
1530  int i;
1531 
1532  frame->linesize[0] = pool->linesize[0];
1533 
1534  if (planes > AV_NUM_DATA_POINTERS) {
1535  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1536  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
1538  sizeof(*frame->extended_buf));
1539  if (!frame->extended_data || !frame->extended_buf) {
1540  av_freep(&frame->extended_data);
1541  av_freep(&frame->extended_buf);
1542  return AVERROR(ENOMEM);
1543  }
1544  } else {
1545  frame->extended_data = frame->data;
1546  av_assert0(frame->nb_extended_buf == 0);
1547  }
1548 
1549  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1550  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1551  if (!frame->buf[i])
1552  goto fail;
1553  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1554  }
1555  for (i = 0; i < frame->nb_extended_buf; i++) {
1556  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1557  if (!frame->extended_buf[i])
1558  goto fail;
1559  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1560  }
1561 
1562  if (avctx->debug & FF_DEBUG_BUFFERS)
1563  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1564 
1565  return 0;
1566 fail:
1567  av_frame_unref(frame);
1568  return AVERROR(ENOMEM);
1569 }
1570 
1572 {
1573  FramePool *pool = s->internal->pool;
1575  int i;
1576 
1577  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1578  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1579  return -1;
1580  }
1581 
1582  if (!desc) {
1583  av_log(s, AV_LOG_ERROR,
1584  "Unable to get pixel format descriptor for format %s\n",
1585  av_get_pix_fmt_name(pic->format));
1586  return AVERROR(EINVAL);
1587  }
1588 
1589  memset(pic->data, 0, sizeof(pic->data));
1590  pic->extended_data = pic->data;
1591 
1592  for (i = 0; i < 4 && pool->pools[i]; i++) {
1593  pic->linesize[i] = pool->linesize[i];
1594 
1595  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1596  if (!pic->buf[i])
1597  goto fail;
1598 
1599  pic->data[i] = pic->buf[i]->data;
1600  }
1601  for (; i < AV_NUM_DATA_POINTERS; i++) {
1602  pic->data[i] = NULL;
1603  pic->linesize[i] = 0;
1604  }
1605  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1607  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1608 
1609  if (s->debug & FF_DEBUG_BUFFERS)
1610  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1611 
1612  return 0;
1613 fail:
1614  av_frame_unref(pic);
1615  return AVERROR(ENOMEM);
1616 }
1617 
1619 {
1620  int ret;
1621 
1622  if (avctx->hw_frames_ctx) {
1623  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1624  frame->width = avctx->coded_width;
1625  frame->height = avctx->coded_height;
1626  return ret;
1627  }
1628 
1629  if ((ret = update_frame_pool(avctx, frame)) < 0)
1630  return ret;
1631 
1632  switch (avctx->codec_type) {
1633  case AVMEDIA_TYPE_VIDEO:
1634  return video_get_buffer(avctx, frame);
1635  case AVMEDIA_TYPE_AUDIO:
1636  return audio_get_buffer(avctx, frame);
1637  default:
1638  return -1;
1639  }
1640 }
1641 
1643 {
1644  int size;
1645  const uint8_t *side_metadata;
1646 
1647  AVDictionary **frame_md = &frame->metadata;
1648 
1649  side_metadata = av_packet_get_side_data(avpkt,
1651  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1652 }
1653 
1655 {
1656  const AVPacket *pkt = avctx->internal->last_pkt_props;
1657  int i;
1658  static const struct {
1659  enum AVPacketSideDataType packet;
1661  } sd[] = {
1670  };
1671 
1672  if (pkt) {
1673  frame->pts = pkt->pts;
1674 #if FF_API_PKT_PTS
1676  frame->pkt_pts = pkt->pts;
1678 #endif
1679  frame->pkt_pos = pkt->pos;
1680  frame->pkt_duration = pkt->duration;
1681  frame->pkt_size = pkt->size;
1682 
1683  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1684  int size;
1685  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1686  if (packet_sd) {
1687  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1688  sd[i].frame,
1689  size);
1690  if (!frame_sd)
1691  return AVERROR(ENOMEM);
1692 
1693  memcpy(frame_sd->data, packet_sd, size);
1694  }
1695  }
1696  add_metadata_from_side_data(pkt, frame);
1697 
1698  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1699  frame->flags |= AV_FRAME_FLAG_DISCARD;
1700  } else {
1701  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1702  }
1703  }
1704  frame->reordered_opaque = avctx->reordered_opaque;
1705 
1706  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1707  frame->color_primaries = avctx->color_primaries;
1708  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1709  frame->color_trc = avctx->color_trc;
1710  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1711  frame->colorspace = avctx->colorspace;
1712  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1713  frame->color_range = avctx->color_range;
1715  frame->chroma_location = avctx->chroma_sample_location;
1716 
1717  switch (avctx->codec->type) {
1718  case AVMEDIA_TYPE_VIDEO:
1719  frame->format = avctx->pix_fmt;
1720  if (!frame->sample_aspect_ratio.num)
1721  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1722 
1723  if (frame->width && frame->height &&
1724  av_image_check_sar(frame->width, frame->height,
1725  frame->sample_aspect_ratio) < 0) {
1726  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1727  frame->sample_aspect_ratio.num,
1728  frame->sample_aspect_ratio.den);
1729  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1730  }
1731 
1732  break;
1733  case AVMEDIA_TYPE_AUDIO:
1734  if (!frame->sample_rate)
1735  frame->sample_rate = avctx->sample_rate;
1736  if (frame->format < 0)
1737  frame->format = avctx->sample_fmt;
1738  if (!frame->channel_layout) {
1739  if (avctx->channel_layout) {
1741  avctx->channels) {
1742  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1743  "configuration.\n");
1744  return AVERROR(EINVAL);
1745  }
1746 
1747  frame->channel_layout = avctx->channel_layout;
1748  } else {
1749  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1750  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1751  avctx->channels);
1752  return AVERROR(ENOSYS);
1753  }
1754  }
1755  }
1756  frame->channels = avctx->channels;
1757  break;
1758  }
1759  return 0;
1760 }
1761 
1763 {
1764  return ff_init_buffer_info(avctx, frame);
1765 }
1766 
1768 {
1769  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1770  int i;
1771  int num_planes = av_pix_fmt_count_planes(frame->format);
1773  int flags = desc ? desc->flags : 0;
1774  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1775  num_planes = 2;
1776  for (i = 0; i < num_planes; i++) {
1777  av_assert0(frame->data[i]);
1778  }
1779  // For now do not enforce anything for palette of pseudopal formats
1780  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PSEUDOPAL))
1781  num_planes = 2;
1782  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1783  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1784  if (frame->data[i])
1785  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1786  frame->data[i] = NULL;
1787  }
1788  }
1789 }
1790 
1791 static void decode_data_free(void *opaque, uint8_t *data)
1792 {
1793  FrameDecodeData *fdd = (FrameDecodeData*)data;
1794 
1795  if (fdd->post_process_opaque_free)
1797 
1798  if (fdd->hwaccel_priv_free)
1799  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1800 
1801  av_freep(&fdd);
1802 }
1803 
1805 {
1806  AVBufferRef *fdd_buf;
1807  FrameDecodeData *fdd;
1808 
1809  av_assert1(!frame->private_ref);
1810  av_buffer_unref(&frame->private_ref);
1811 
1812  fdd = av_mallocz(sizeof(*fdd));
1813  if (!fdd)
1814  return AVERROR(ENOMEM);
1815 
1816  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1818  if (!fdd_buf) {
1819  av_freep(&fdd);
1820  return AVERROR(ENOMEM);
1821  }
1822 
1823  frame->private_ref = fdd_buf;
1824 
1825  return 0;
1826 }
1827 
1829 {
1830  const AVHWAccel *hwaccel = avctx->hwaccel;
1831  int override_dimensions = 1;
1832  int ret;
1833 
1834  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1835  if ((ret = av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1836  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1837  return AVERROR(EINVAL);
1838  }
1839 
1840  if (frame->width <= 0 || frame->height <= 0) {
1841  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1842  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1843  override_dimensions = 0;
1844  }
1845 
1846  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1847  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1848  return AVERROR(EINVAL);
1849  }
1850  }
1851  ret = ff_decode_frame_props(avctx, frame);
1852  if (ret < 0)
1853  return ret;
1854 
1855  if (hwaccel) {
1856  if (hwaccel->alloc_frame) {
1857  ret = hwaccel->alloc_frame(avctx, frame);
1858  goto end;
1859  }
1860  } else
1861  avctx->sw_pix_fmt = avctx->pix_fmt;
1862 
1863  ret = avctx->get_buffer2(avctx, frame, flags);
1864  if (ret < 0)
1865  goto end;
1866 
1867  validate_avframe_allocation(avctx, frame);
1868 
1869  ret = ff_attach_decode_data(frame);
1870  if (ret < 0)
1871  goto end;
1872 
1873 end:
1874  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1876  frame->width = avctx->width;
1877  frame->height = avctx->height;
1878  }
1879 
1880  if (ret < 0)
1881  av_frame_unref(frame);
1882 
1883  return ret;
1884 }
1885 
1887 {
1888  int ret = get_buffer_internal(avctx, frame, flags);
1889  if (ret < 0) {
1890  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1891  frame->width = frame->height = 0;
1892  }
1893  return ret;
1894 }
1895 
1897 {
1898  AVFrame *tmp;
1899  int ret;
1900 
1902 
1903  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1904  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1905  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1906  av_frame_unref(frame);
1907  }
1908 
1909  ff_init_buffer_info(avctx, frame);
1910 
1911  if (!frame->data[0])
1912  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1913 
1914  if (av_frame_is_writable(frame))
1915  return ff_decode_frame_props(avctx, frame);
1916 
1917  tmp = av_frame_alloc();
1918  if (!tmp)
1919  return AVERROR(ENOMEM);
1920 
1921  av_frame_move_ref(tmp, frame);
1922 
1923  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1924  if (ret < 0) {
1925  av_frame_free(&tmp);
1926  return ret;
1927  }
1928 
1929  av_frame_copy(frame, tmp);
1930  av_frame_free(&tmp);
1931 
1932  return 0;
1933 }
1934 
1936 {
1937  int ret = reget_buffer_internal(avctx, frame);
1938  if (ret < 0)
1939  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1940  return ret;
1941 }
1942 
1944 {
1945  avctx->internal->draining = 0;
1946  avctx->internal->draining_done = 0;
1947  avctx->internal->nb_draining_errors = 0;
1951  avctx->internal->buffer_pkt_valid = 0;
1952 
1953  av_packet_unref(avctx->internal->ds.in_pkt);
1954 
1955  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
1956  ff_thread_flush(avctx);
1957  else if (avctx->codec->flush)
1958  avctx->codec->flush(avctx);
1959 
1960  avctx->pts_correction_last_pts =
1961  avctx->pts_correction_last_dts = INT64_MIN;
1962 
1963  ff_decode_bsfs_uninit(avctx);
1964 
1965  if (!avctx->refcounted_frames)
1966  av_frame_unref(avctx->internal->to_free);
1967 }
1968 
1970 {
1971  DecodeFilterContext *s = &avctx->internal->filter;
1972  int i;
1973 
1974  for (i = 0; i < s->nb_bsfs; i++)
1975  av_bsf_free(&s->bsfs[i]);
1976  av_freep(&s->bsfs);
1977  s->nb_bsfs = 0;
1978 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:86
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:59
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2551
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwaccel.h:34
int nb_draining_errors
Definition: internal.h:218
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:3056
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1288
const struct AVCodec * codec
Definition: avcodec.h:1497
const char const char void * val
Definition: avisynth_c.h:771
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:3019
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AV_NUM_DATA_POINTERS
Definition: frame.h:202
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5591
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: avcodec.h:1126
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3036
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:385
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2363
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:3634
int stride_align[AV_NUM_DATA_POINTERS]
Definition: internal.h:110
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:3242
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:602
const struct AVCodecHWConfigInternal ** hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: avcodec.h:3477
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:934
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1675
int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame)
does needed setup of pkt_pts/pos and such for (re)get_buffer();
Definition: decode.c:1654
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:473
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:3537
const char * fmt
Definition: avisynth_c.h:769
void(* flush)(AVCodecContext *)
Flush buffers.
Definition: avcodec.h:3457
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:170
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:940
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2403
AVFrame * to_free
Definition: internal.h:157
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1420
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:841
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:196
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:393
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:589
const char * desc
Definition: nvenc.c:63
int width
Definition: internal.h:109
static FFServerConfig config
Definition: ffserver.c:193
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: avcodec.h:1200
ATSC A53 Part 4 Closed Captions.
Definition: avcodec.h:1328
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:50
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2118
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:411
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5557
int size
Definition: avcodec.h:1401
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:3531
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1866
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: avcodec.h:760
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1697
int samples
Definition: internal.h:114
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:367
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:833
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:207
Mastering display metadata associated with a video frame.
Definition: frame.h:119
The codec supports this format via the hw_frames_ctx interface.
Definition: avcodec.h:3306
unsigned num_rects
Definition: avcodec.h:3792
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:537
enum AVMediaType type
Definition: avcodec.h:3364
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:56
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:47
static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1828
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:848
AVBufferPool * pools[4]
Pools for each data plane.
Definition: internal.h:103
int(* decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt)
Definition: avcodec.h:3434
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1762
size_t crop_bottom
Definition: frame.h:560
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:998
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2644
static int utf8_check(const uint8_t *str)
Definition: decode.c:906
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:134
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
Definition: decode.c:1935
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:682
void ff_decode_bsfs_uninit(AVCodecContext *avctx)
Definition: decode.c:1969
Mastering display metadata (based on SMPTE-2086:2014).
Definition: avcodec.h:1308
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1610
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:3793
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:176
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:48
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx)
Allocate a context for a given bitstream filter.
Definition: bsf.c:81
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:981
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:196
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:97
DecodeFilterContext filter
Definition: internal.h:164
int height
Definition: internal.h:109
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1086
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2151
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1804
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:517
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:335
size_t crop_left
Definition: frame.h:561
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:152
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1418
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1194
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:826
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:578
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:294
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3329
static AVFrame * frame
int planes
Definition: internal.h:112
Structure to hold side data for an AVFrame.
Definition: frame.h:163
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:287
size_t compat_decode_consumed
Definition: internal.h:207
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: avcodec.h:1400
static int flags
Definition: log.c:57
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:488
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2719
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
ptrdiff_t size
Definition: opengl_enc.c:101
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:198
static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
Definition: decode.c:256
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2125
#define av_log(a,...)
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:599
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
FramePool * pool
Definition: internal.h:159
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:246
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:86
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3012
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2231
int width
Definition: frame.h:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1777
void * post_process_opaque
Definition: decode.h:49
#define AV_BPRINT_SIZE_UNLIMITED
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1239
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1767
#define AVERROR(e)
Definition: error.h:43
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: avcodec.h:1158
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:827
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:3037
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2758
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: avcodec.h:3334
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:2089
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:709
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
Definition: decode.c:125
AVFrame * buffer_frame
Definition: internal.h:200
int capabilities
Codec capabilities.
Definition: avcodec.h:3370
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:446
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1383
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1568
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:457
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5597
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
int side_data_elems
Definition: avcodec.h:1412
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:3184
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:325
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1896
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:77
#define FFMAX(a, b)
Definition: common.h:94
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:461
#define fail()
Definition: checkasm.h:112
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
Definition: avstring.c:149
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:746
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwaccel.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:3648
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1406
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2194
uint32_t end_display_time
Definition: avcodec.h:3791
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3794
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:379
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: avcodec.h:712
size_t crop_top
Definition: frame.h:559
The codec supports this format by some internal method.
Definition: avcodec.h:3313
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:197
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:558
int channels
number of audio channels, only used for audio.
Definition: frame.h:506
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:439
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2612
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2750
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:3510
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:407
int channels
Definition: internal.h:113
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:3550
AVFrame * compat_decode_frame
Definition: internal.h:211
int width
picture width / height.
Definition: avcodec.h:1660
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3154
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1642
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5603
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:744
AVPacket * in_pkt
Definition: internal.h:118
#define AV_PIX_FMT_FLAG_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:158
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: avcodec.h:1314
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:175
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2097
AVFrameSideDataType
Definition: frame.h:48
uint16_t format
Definition: avcodec.h:3789
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:2590
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:3663
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:2637
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2623
int n
Definition: avisynth_c.h:684
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: avcodec.h:3468
DecodeSimpleContext ds
Definition: internal.h:163
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:3045
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1526
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:193
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2739
int linesize[4]
Definition: internal.h:111
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:3053
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1943
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
Content light level (based on CTA-861.3).
Definition: avcodec.h:1321
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:642
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1618
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:1496
int compat_decode_warned
Definition: internal.h:204
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:481
A list of zero terminated key/value strings.
Definition: avcodec.h:1258
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:826
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:549
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
int sample_rate
samples per second
Definition: avcodec.h:2143
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
int debug
debug
Definition: avcodec.h:2568
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1890
main external API structure.
Definition: avcodec.h:1488
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:590
int skip_samples_multiplier
Definition: internal.h:215
uint8_t * data
The data buffer.
Definition: buffer.h:89
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1120
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:289
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1886
uint8_t * data
Definition: frame.h:165
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: avcodec.h:755
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void * buf
Definition: avisynth_c.h:690
size_t crop_right
Definition: frame.h:562
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:1675
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:369
int sample_rate
Sample rate of the audio data.
Definition: frame.h:374
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1739
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:680
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:84
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:999
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:275
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2111
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2104
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2249
Recommmends skipping the specified number of samples.
Definition: avcodec.h:1242
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:122
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:3161
int buffer_pkt_valid
Definition: internal.h:199
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:829
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:183
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2298
#define STRIDE_ALIGN
Definition: internal.h:95
enum AVChromaLocation chroma_location
Definition: frame.h:459
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:466
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:2314
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:510
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:262
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: avcodec.h:1185
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1149
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1791
#define UTF8_MAX_BYTES
Definition: decode.c:847
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:302
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:198
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:310
A reference to a data buffer.
Definition: buffer.h:81
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:138
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1411
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:37
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1204
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:44
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: avcodec.h:1439
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
common internal and external API header
if(ret< 0)
Definition: vf_mcdeint.c:279
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:238
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:3670
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1020
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:3642
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:239
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:926
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:188
The codec supports this format by some ad-hoc method.
Definition: avcodec.h:3322
int caps_internal
Internal codec capabilities.
Definition: avcodec.h:3462
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:253
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:770
AVBSFContext ** bsfs
Definition: internal.h:123
The codec supports this format via the hw_device_ctx interface.
Definition: avcodec.h:3297
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1571
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:3748
AVHWDeviceType
Definition: hwcontext.h:27
void ff_thread_flush(AVCodecContext *avctx)
Wait for decoding threads to finish and reset internal state.
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int channels
number of audio channels
Definition: avcodec.h:2144
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1523
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:3783
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn't be output.
Definition: frame.h:431
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1575
enum AVColorPrimaries color_primaries
Definition: frame.h:448
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1399
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:3038
size_t compat_decode_partial_size
Definition: internal.h:210
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:867
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2174
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1424
int height
Definition: frame.h:259
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3035
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:450
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:334
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:55
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: avcodec.h:3341
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:3164
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2279
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
enum AVSubtitleType type
Definition: avcodec.h:3774
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:248
int format
Definition: internal.h:108
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3206
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:515
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1377
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5585
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1118
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:267
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2546
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:956
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1393
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:3005
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:609
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1276
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static int bsfs_init(AVCodecContext *avctx)
Definition: decode.c:184
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: avcodec.h:1206
static uint8_t tmp[11]
Definition: aes_ctr.c:26
int(* receive_frame)(AVCodecContext *avctx, AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: avcodec.h:3452