FFmpeg
adxenc.c
Go to the documentation of this file.
1 /*
2  * ADX ADPCM codecs
3  * Copyright (c) 2001,2003 BERO
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "avcodec.h"
23 #include "adx.h"
24 #include "bytestream.h"
25 #include "internal.h"
26 #include "put_bits.h"
27 
28 /**
29  * @file
30  * SEGA CRI adx codecs.
31  *
32  * Reference documents:
33  * http://ku-www.ss.titech.ac.jp/~yatsushi/adx.html
34  * adx2wav & wav2adx http://www.geocities.co.jp/Playtown/2004/
35  */
36 
37 static void adx_encode(ADXContext *c, uint8_t *adx, const int16_t *wav,
38  ADXChannelState *prev, int channels)
39 {
40  PutBitContext pb;
41  int scale;
42  int i, j;
43  int s0, s1, s2, d;
44  int max = 0;
45  int min = 0;
46 
47  s1 = prev->s1;
48  s2 = prev->s2;
49  for (i = 0, j = 0; j < 32; i += channels, j++) {
50  s0 = wav[i];
51  d = s0 + ((-c->coeff[0] * s1 - c->coeff[1] * s2) >> COEFF_BITS);
52  if (max < d)
53  max = d;
54  if (min > d)
55  min = d;
56  s2 = s1;
57  s1 = s0;
58  }
59 
60  if (max == 0 && min == 0) {
61  prev->s1 = s1;
62  prev->s2 = s2;
63  memset(adx, 0, BLOCK_SIZE);
64  return;
65  }
66 
67  if (max / 7 > -min / 8)
68  scale = max / 7;
69  else
70  scale = -min / 8;
71 
72  if (scale == 0)
73  scale = 1;
74 
75  AV_WB16(adx, scale);
76 
77  init_put_bits(&pb, adx + 2, 16);
78 
79  s1 = prev->s1;
80  s2 = prev->s2;
81  for (i = 0, j = 0; j < 32; i += channels, j++) {
82  d = wav[i] + ((-c->coeff[0] * s1 - c->coeff[1] * s2) >> COEFF_BITS);
83 
84  d = av_clip_intp2(ROUNDED_DIV(d, scale), 3);
85 
86  put_sbits(&pb, 4, d);
87 
88  s0 = d * scale + ((c->coeff[0] * s1 + c->coeff[1] * s2) >> COEFF_BITS);
89  s2 = s1;
90  s1 = s0;
91  }
92  prev->s1 = s1;
93  prev->s2 = s2;
94 
95  flush_put_bits(&pb);
96 }
97 
98 #define HEADER_SIZE 36
99 
100 static int adx_encode_header(AVCodecContext *avctx, uint8_t *buf, int bufsize)
101 {
102  ADXContext *c = avctx->priv_data;
103 
104  bytestream_put_be16(&buf, 0x8000); /* header signature */
105  bytestream_put_be16(&buf, HEADER_SIZE - 4); /* copyright offset */
106  bytestream_put_byte(&buf, 3); /* encoding */
107  bytestream_put_byte(&buf, BLOCK_SIZE); /* block size */
108  bytestream_put_byte(&buf, 4); /* sample size */
109  bytestream_put_byte(&buf, avctx->channels); /* channels */
110  bytestream_put_be32(&buf, avctx->sample_rate); /* sample rate */
111  bytestream_put_be32(&buf, 0); /* total sample count */
112  bytestream_put_be16(&buf, c->cutoff); /* cutoff frequency */
113  bytestream_put_byte(&buf, 3); /* version */
114  bytestream_put_byte(&buf, 0); /* flags */
115  bytestream_put_be32(&buf, 0); /* unknown */
116  bytestream_put_be32(&buf, 0); /* loop enabled */
117  bytestream_put_be16(&buf, 0); /* padding */
118  bytestream_put_buffer(&buf, "(c)CRI", 6); /* copyright signature */
119 
120  return HEADER_SIZE;
121 }
122 
124 {
125  ADXContext *c = avctx->priv_data;
126 
127  if (avctx->channels > 2) {
128  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
129  return AVERROR(EINVAL);
130  }
131  avctx->frame_size = BLOCK_SAMPLES;
132 
133  /* the cutoff can be adjusted, but this seems to work pretty well */
134  c->cutoff = 500;
135  ff_adx_calculate_coeffs(c->cutoff, avctx->sample_rate, COEFF_BITS, c->coeff);
136 
137  return 0;
138 }
139 
140 static int adx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
141  const AVFrame *frame, int *got_packet_ptr)
142 {
143  ADXContext *c = avctx->priv_data;
144  const int16_t *samples = (const int16_t *)frame->data[0];
145  uint8_t *dst;
146  int ch, out_size, ret;
147 
148  out_size = BLOCK_SIZE * avctx->channels + !c->header_parsed * HEADER_SIZE;
149  if ((ret = ff_alloc_packet2(avctx, avpkt, out_size, 0)) < 0)
150  return ret;
151  dst = avpkt->data;
152 
153  if (!c->header_parsed) {
154  int hdrsize;
155  if ((hdrsize = adx_encode_header(avctx, dst, avpkt->size)) < 0) {
156  av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
157  return AVERROR(EINVAL);
158  }
159  dst += hdrsize;
160  c->header_parsed = 1;
161  }
162 
163  for (ch = 0; ch < avctx->channels; ch++) {
164  adx_encode(c, dst, samples + ch, &c->prev[ch], avctx->channels);
165  dst += BLOCK_SIZE;
166  }
167 
168  *got_packet_ptr = 1;
169  return 0;
170 }
171 
173  .name = "adpcm_adx",
174  .long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"),
175  .type = AVMEDIA_TYPE_AUDIO,
176  .id = AV_CODEC_ID_ADPCM_ADX,
177  .priv_data_size = sizeof(ADXContext),
179  .encode2 = adx_encode_frame,
180  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
182 };
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2245
AVCodec
AVCodec.
Definition: avcodec.h:3481
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ADXChannelState::s2
int s2
Definition: adx.h:39
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:2225
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:686
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
put_sbits
static void put_sbits(PutBitContext *pb, int n, int32_t value)
Definition: put_bits.h:240
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
out_size
int out_size
Definition: movenc.c:55
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
channels
channels
Definition: aptx.c:30
max
#define max(a, b)
Definition: cuda_runtime.h:33
ADXChannelState::s1
int s1
Definition: adx.h:39
adx_encode_frame
static int adx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: adxenc.c:140
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
adx_encode
static void adx_encode(ADXContext *c, uint8_t *adx, const int16_t *wav, ADXChannelState *prev, int channels)
Definition: adxenc.c:37
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
s1
#define s1
Definition: regdef.h:38
BLOCK_SAMPLES
#define BLOCK_SAMPLES
Definition: adx.h:54
PutBitContext
Definition: put_bits.h:35
if
if(ret)
Definition: filter_design.txt:179
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:56
adx_encode_init
static av_cold int adx_encode_init(AVCodecContext *avctx)
Definition: adxenc.c:123
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_adpcm_adx_encoder
AVCodec ff_adpcm_adx_encoder
Definition: adxenc.c:172
ff_adx_calculate_coeffs
void ff_adx_calculate_coeffs(int cutoff, int sample_rate, int bits, int *coeff)
Calculate LPC coefficients based on cutoff frequency and sample rate.
Definition: adx.c:26
s2
#define s2
Definition: regdef.h:39
AV_CODEC_ID_ADPCM_ADX
@ AV_CODEC_ID_ADPCM_ADX
Definition: avcodec.h:511
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
HEADER_SIZE
#define HEADER_SIZE
Definition: adxenc.c:98
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:2226
bytestream_put_buffer
static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
Definition: bytestream.h:368
ADXChannelState
Definition: adx.h:38
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
uint8_t
uint8_t
Definition: audio_convert.c:194
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
adx_encode_header
static int adx_encode_header(AVCodecContext *avctx, uint8_t *buf, int bufsize)
Definition: adxenc.c:100
avcodec.h
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
ADXContext
Definition: adx.h:42
COEFF_BITS
#define COEFF_BITS
Definition: adx.h:51
adx.h
s0
#define s0
Definition: regdef.h:37
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
bytestream.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
BLOCK_SIZE
#define BLOCK_SIZE
Definition: adx.h:53
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
put_bits.h
min
float min
Definition: vorbis_enc_data.h:456