Go to the documentation of this file.
61 "changes, but PARAM_CHANGE side data was sent to it.\n");
75 val = bytestream_get_le32(&
data);
76 if (val <= 0 || val > INT_MAX) {
93 val = bytestream_get_le32(&
data);
94 if (val <= 0 || val > INT_MAX) {
105 avctx->
width = bytestream_get_le32(&
data);
166 if (!
frame->extended_data) {
189 const char *bsfs_str;
196 while (bsfs_str && *bsfs_str) {
199 char *bsf, *bsf_options_str, *bsf_name;
206 bsf_name =
av_strtok(bsf,
"=", &bsf_options_str);
216 "requested by a decoder. This is a bug, please report it.\n",
238 if (
s->nb_bsfs == 1) {
242 s->bsfs[
s->nb_bsfs - 1]->time_base_in = (
AVRational){ 1, 90000 };
246 s->bsfs[
s->nb_bsfs - 1]->time_base_in =
s->bsfs[
s->nb_bsfs - 2]->time_base_out;
248 s->bsfs[
s->nb_bsfs - 2]->par_out);
255 if (bsf_options_str &&
filter->priv_class) {
257 const char * shorthand[2] = {
NULL};
260 shorthand[0] = opt->
name;
266 "requested by the decoder. This is a bug, please report it.\n",
297 idx =
s->nb_bsfs - 1;
312 if (idx ==
s->nb_bsfs - 1) {
319 "Error pre-processing a packet before decoding\n");
371 int64_t reordered_pts, int64_t dts)
376 ctx->pts_correction_num_faulty_dts += dts <=
ctx->pts_correction_last_dts;
377 ctx->pts_correction_last_dts = dts;
379 ctx->pts_correction_last_dts = reordered_pts;
382 ctx->pts_correction_num_faulty_pts += reordered_pts <=
ctx->pts_correction_last_pts;
383 ctx->pts_correction_last_pts = reordered_pts;
385 ctx->pts_correction_last_pts = dts;
387 if ((
ctx->pts_correction_num_faulty_pts<=
ctx->pts_correction_num_faulty_dts || dts ==
AV_NOPTS_VALUE)
408 int got_frame, actual_got_frame;
451 actual_got_frame = got_frame;
463 uint32_t discard_padding = 0;
467 if (
ret >= 0 && got_frame) {
473 if (!
frame->channel_layout)
475 if (!
frame->channels)
477 if (!
frame->sample_rate)
482 if(side && side_size>=10) {
484 discard_padding =
AV_RL32(side + 4);
487 skip_reason =
AV_RL8(side + 8);
488 discard_reason =
AV_RL8(side + 9);
512 frame->pts += diff_ts;
516 frame->pkt_pts += diff_ts;
520 frame->pkt_dts += diff_ts;
521 if (
frame->pkt_duration >= diff_ts)
522 frame->pkt_duration -= diff_ts;
533 if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
535 if (discard_padding ==
frame->nb_samples) {
542 frame->pkt_duration = diff_ts;
547 (
int)discard_padding,
frame->nb_samples);
548 frame->nb_samples -= discard_padding;
577 #if FF_API_AVCTX_TIMEBASE
593 "Stop draining and force EOF.\n");
628 while (!
frame->buf[0]) {
658 if (
frame->private_ref) {
688 if (avpkt && !avpkt->
size && avpkt->
data)
716 if (
frame->crop_left >= INT_MAX -
frame->crop_right ||
717 frame->crop_top >= INT_MAX -
frame->crop_bottom ||
721 "Invalid cropping information set by a decoder: "
723 "(frame size %dx%d). This is a bug, please report it\n",
726 frame->crop_left = 0;
727 frame->crop_right = 0;
729 frame->crop_bottom = 0;
805 " drop count: %d \n",
835 "Got unexpected packet size after a partial decode\n");
873 "API cannot return all the frames for this decoder. "
874 "Some frames will be dropped. Update your code to the "
875 "new decoding API to fix this.\n");
899 int *got_picture_ptr,
902 return compat_decode(avctx, picture, got_picture_ptr, avpkt);
915 memset(sub, 0,
sizeof(*sub));
919 #define UTF8_MAX_BYTES 4
924 iconv_t cd = (iconv_t)-1;
956 if (iconv(cd, &inb, &inl, &outb, &outl) == (
size_t)-1 ||
957 iconv(cd,
NULL,
NULL, &outb, &outl) == (
size_t)-1 ||
958 outl >= outpkt->
size || inl != 0) {
965 outpkt->
size -= outl;
966 memset(outpkt->
data + outpkt->
size, 0, outl);
969 if (cd != (iconv_t)-1)
981 uint32_t codepoint,
min;
985 GET_UTF8(codepoint, *(
byte++),
return 0;);
986 min =
byte - str == 1 ? 0 :
byte - str == 2 ? 0x80 :
987 1 << (5 * (
byte - str) - 4);
988 if (codepoint < min || codepoint >= 0x110000 ||
989 codepoint == 0xFFFE ||
990 codepoint >= 0xD800 && codepoint <= 0xDFFF )
997 #if FF_API_ASS_TIMING
1005 h = ts/360000; ts -= 360000*
h;
1006 m = ts/ 6000; ts -= 6000*m;
1007 s = ts/ 100; ts -= 100*
s;
1023 int ts_start, ts_duration = -1;
1032 dialog = strchr(
rect->ass,
',');
1038 layer = strtol(dialog, (
char**)&dialog, 10);
1052 insert_ts(&
buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
1062 rect->ass = final_dialog;
1104 ret = avctx->
codec->
decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1108 #if FF_API_ASS_TIMING
1135 "Invalid UTF-8 in decoded subtitles text; "
1136 "maybe missing -sub_charenc option\n");
1143 if (avpkt->
data != pkt_recoded.
data) {
1236 "required for hardware accelerated decoding.\n");
1241 if (device_ctx->
type != dev_type) {
1315 *out_frames_ref = frames_ref;
1344 if (hwaccel->
init) {
1345 err = hwaccel->
init(avctx);
1348 "hwaccel initialisation returned error.\n",
1396 memcpy(choices,
fmt, (
n + 1) *
sizeof(*choices));
1402 user_choice = avctx->
get_format(avctx, choices);
1412 "get_format() callback.\n");
1419 for (
i = 0;
i <
n;
i++) {
1420 if (choices[
i] == user_choice)
1425 "%s not in possible list.\n",
desc->name);
1447 config = &hw_config->
public;
1454 if (frames_ctx->
format != user_choice) {
1456 "does not match the format of the provided frames "
1457 "context.\n",
desc->name);
1467 "does not match the type of the provided device "
1468 "context.\n",
desc->name);
1479 "missing configuration.\n",
desc->name);
1484 "initialisation.\n",
desc->name);
1494 "get_format() without it.\n",
desc->name);
1495 for (
i = 0;
i <
n;
i++) {
1496 if (choices[
i] == user_choice)
1499 for (;
i + 1 <
n;
i++)
1500 choices[
i] = choices[
i + 1];
1517 int size[4] = { 0 };
1520 int tmpsize, unaligned;
1538 for (
i = 0;
i < 4;
i++)
1540 }
while (unaligned);
1547 for (
i = 0;
i < 3 &&
data[
i + 1];
i++)
1551 for (
i = 0;
i < 4;
i++) {
1556 CONFIG_MEMORY_POISONING ?
1587 if (!pool->
pools[0]) {
1602 for (
i = 0;
i < 4;
i++)
1622 sizeof(*
frame->extended_buf));
1623 if (!
frame->extended_data || !
frame->extended_buf) {
1639 for (
i = 0;
i <
frame->nb_extended_buf;
i++) {
1641 if (!
frame->extended_buf[
i])
1668 "Unable to get pixel format descriptor for format %s\n",
1673 memset(pic->
data, 0,
sizeof(pic->
data));
1676 for (
i = 0;
i < 4 && pool->
pools[
i];
i++) {
1742 static const struct {
1777 memcpy(frame_sd->
data, packet_sd,
size);
1804 if (!
frame->sample_aspect_ratio.num)
1809 frame->sample_aspect_ratio) < 0) {
1811 frame->sample_aspect_ratio.num,
1812 frame->sample_aspect_ratio.den);
1818 if (!
frame->sample_rate)
1820 if (
frame->format < 0)
1822 if (!
frame->channel_layout) {
1827 "configuration.\n");
1857 for (
i = 0;
i < num_planes;
i++) {
1863 av_log(avctx,
AV_LOG_ERROR,
"Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1901 frame->private_ref = fdd_buf;
1909 int override_dimensions = 1;
1919 if (
frame->width <= 0 ||
frame->height <= 0) {
1922 override_dimensions = 0;
1983 av_log(avctx,
AV_LOG_WARNING,
"Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1988 if (!
frame->data[0])
2024 for (
int i = 0;
i <
s->nb_bsfs;
i++)
2059 for (
i = 0;
i <
s->nb_bsfs;
i++)
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define FF_ENABLE_DEPRECATION_WARNINGS
#define AV_LOG_WARNING
Something somehow does not look correct.
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
static enum AVPixelFormat hw_pix_fmt
#define AV_BPRINT_SIZE_UNLIMITED
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int av_codec_is_decoder(const AVCodec *codec)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
uint64_t channel_layout
Audio channel layout.
enum AVColorSpace colorspace
YUV colorspace type.
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
int sample_rate
samples per second
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
@ AV_PKT_DATA_PARAM_CHANGE
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
uint8_t * data
The data buffer.
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
void ff_thread_flush(AVCodecContext *avctx)
Wait for decoding threads to finish and reset internal state.
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e....
int ff_decode_bsfs_init(AVCodecContext *avctx)
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_cold int end(AVCodecContext *avctx)
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
This structure describes decoded (raw) audio or video data.
AVBufferPool * pools[4]
Pools for each data plane.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
int capabilities
Codec capabilities.
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
int capabilities
Hardware accelerated codec capabilities.
size_t compat_decode_consumed
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
@ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE
void * av_mallocz_array(size_t nmemb, size_t size)
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
void ff_decode_bsfs_uninit(AVCodecContext *avctx)
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
void(* hwaccel_priv_free)(void *priv)
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
size_t compat_decode_partial_size
int stride_align[AV_NUM_DATA_POINTERS]
@ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT
int avcodec_is_open(AVCodecContext *s)
@ AV_PKT_DATA_SPHERICAL
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
The bitstream filter state.
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
@ AV_CODEC_HW_CONFIG_METHOD_AD_HOC
The codec supports this format by some ad-hoc method.
uint64_t initial_channel_layout
@ SUBTITLE_ASS
Formatted text, the ass field must be set by the decoder and is authoritative.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx)
Allocate a context for a given bitstream filter.
const struct AVCodec * codec
int showed_multi_packet_warning
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding.
static void bsfs_flush(AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
void(* post_process_opaque_free)(void *opaque)
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
char * ass
0 terminated ASS/SSA compatible event line.
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
@ AV_PKT_DATA_STRINGS_METADATA
A list of zero terminated key/value strings.
@ AV_PKT_DATA_REPLAYGAIN
This side data should be associated with an audio stream and contains ReplayGain information in form ...
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
#define AV_CODEC_FLAG_DROPCHANGED
Don't output frames whose parameters differ from first decoded frame in stream.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void hwaccel_uninit(AVCodecContext *avctx)
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
@ AV_PKT_DATA_AUDIO_SERVICE_TYPE
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
int has_b_frames
Size of the frame reordering buffer in the decoder.
@ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
static void get_subtitle_defaults(AVSubtitle *sub)
void * post_process_opaque
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
@ AV_PKT_DATA_STEREO3D
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
AVFrame * compat_decode_frame
#define AV_CEIL_RSHIFT(a, b)
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
@ AV_PKT_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata (based on SMPTE-2086:2014).
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
int channels
number of audio channels, only used for audio.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
#define AVERROR_INPUT_CHANGED
Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED)
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
int64_t pts
Same as packet pts, in AV_TIME_BASE.
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
int64_t max_pixels
The number of pixels per image to maximally accept.
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
@ AV_CODEC_HW_CONFIG_METHOD_INTERNAL
The codec supports this format by some internal method.
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
static void decode_data_free(void *opaque, uint8_t *data)
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
int changed_frames_dropped
char * sub_charenc
DTS of the last frame.
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
static int utf8_check(const uint8_t *str)
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
int apply_cropping
Video decoding only.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Rational number (pair of numerator and denominator).
struct AVCodecInternal * internal
Private context used for internal data.
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
int skip_samples_multiplier
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
@ AVCOL_RANGE_UNSPECIFIED
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
int flags2
AV_CODEC_FLAG2_*.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
static const struct @314 planes[]
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
void av_bsf_flush(AVBSFContext *ctx)
Reset the internal bitstream filter state / flush internal buffers.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
void * hwaccel_priv_data
hwaccel-specific private data
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
enum AVSampleFormat sample_fmt
audio sample format
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
#define AV_NUM_DATA_POINTERS
static AVRational av_make_q(int num, int den)
Create an AVRational.
#define AV_NOPTS_VALUE
Undefined timestamp value.
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
@ AVCHROMA_LOC_UNSPECIFIED
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
uint32_t end_display_time
const char const char void * val
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
uint64_t channel_layout
Channel layout of the audio data.
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
@ AV_PKT_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
int flags
A combination of AV_PKT_FLAG values.
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
#define AV_LOG_INFO
Standard information.
#define FF_THREAD_FRAME
Decode more than one frame at once.
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
int channels
number of audio channels
const char * name
Name of the hardware accelerated codec.
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
int nb_samples
number of audio samples (per channel) described by this frame
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
uint8_t ** extended_data
pointers to the data planes/channels.
#define av_malloc_array(a, b)
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
int64_t pts_correction_last_dts
PTS of the last frame.
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
@ AV_PKT_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
int(* decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt)
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const AVOption * av_opt_next(const void *obj, const AVOption *last)
Iterate over all AVOptions belonging to obj.
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
int sub_charenc_mode
Subtitles character encoding mode.
This struct describes a set or pool of "hardware" frames (i.e.
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding.
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
int caps_internal
Internal codec capabilities.
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
const struct AVCodecHWConfigInternal ** hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
void av_bprintf(AVBPrint *buf, const char *fmt,...)
DecodeFilterContext filter
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
#define AV_INPUT_BUFFER_PADDING_SIZE
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
#define FF_ARRAY_ELEMS(a)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static void insert_ts(AVBPrint *buf, int ts)
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
@ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
int draining
checks API usage: after codec draining, flush is required to resume operation
#define FF_DISABLE_DEPRECATION_WARNINGS
int coded_width
Bitstream width / height, may be different from width/height e.g.
int initial_pool_size
Initial size of the frame pool.
enum AVMediaType codec_type
char * av_strdup(const char *s)
Duplicate a string.
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn't be output.
A reference to a data buffer.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time,...
int(* receive_frame)(AVCodecContext *avctx, AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
int ff_attach_decode_data(AVFrame *frame)
int frame_number
Frame counter, set by libavcodec.
Structure to hold side data for an AVFrame.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
void(* flush)(AVCodecContext *)
Flush buffers.
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
int64_t pos
byte position in stream, -1 if unknown
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame....
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
int width
picture width / height.
void * hwaccel_priv
Per-frame private data for hwaccels.
#define flags(name, subs,...)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
#define FF_SANE_NB_CHANNELS
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
enum AVHWDeviceType device_type
The device type associated with the configuration.
enum AVPixelFormat pix_fmt
Supported pixel format.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8