Go to the documentation of this file.
38 #define ON2AVC_SUBFRAME_SIZE 1024
96 int w,
b, band_off = 0;
101 for (
w = 0;
w <
c->num_windows;
w++) {
102 if (!
c->grouping[
w]) {
103 memcpy(
c->ms_info + band_off,
104 c->ms_info + band_off -
c->num_bands,
105 c->num_bands *
sizeof(*
c->ms_info));
106 band_off +=
c->num_bands;
109 for (
b = 0;
b <
c->num_bands;
b++)
117 int bits_per_sect =
c->is_long ? 5 : 3;
118 int esc_val = (1 << bits_per_sect) - 1;
132 }
while (
run == esc_val);
150 for (
w = 0;
w <
c->num_windows;
w++) {
151 if (!
c->grouping[
w]) {
152 memcpy(
c->band_scales + band_off,
153 c->band_scales + band_off -
c->num_bands,
154 c->num_bands *
sizeof(*
c->band_scales));
155 band_off +=
c->num_bands;
158 for (
b = 0;
b <
c->num_bands;
b++) {
159 if (!
c->band_type[band_off]) {
161 for (w2 =
w + 1; w2 <
c->num_windows; w2++) {
164 if (
c->band_type[w2 *
c->num_bands +
b]) {
170 c->band_scales[band_off++] = 0;
180 if (scale < 0 || scale > 127) {
185 c->band_scales[band_off++] =
c->scale_tab[
scale];
199 int dst_size,
int type,
float band_scale)
203 for (
i = 0;
i < dst_size;
i += 4) {
206 for (j = 0; j < 4; j++) {
232 int dst_size,
int type,
float band_scale)
234 int i,
val, val1, val2, sign;
236 for (
i = 0;
i < dst_size;
i += 2) {
242 if (val1 <= -16 || val1 >= 16) {
243 sign = 1 - (val1 < 0) * 2;
246 if (val2 <= -16 || val2 >= 16) {
247 sign = 1 - (val2 < 0) * 2;
270 coeff_ptr =
c->coeffs[ch];
273 for (
w = 0;
w <
c->num_windows;
w++) {
274 for (
b = 0;
b <
c->num_bands;
b++) {
275 int band_size =
c->band_start[
b + 1] -
c->band_start[
b];
279 coeff_ptr += band_size;
284 c->band_scales[band_idx +
b]);
287 c->band_scales[band_idx +
b]);
288 coeff_ptr += band_size;
290 band_idx +=
c->num_bands;
300 float *ch0 =
c->coeffs[0];
301 float *ch1 =
c->coeffs[1];
303 for (
w = 0;
w <
c->num_windows;
w++) {
304 for (
b = 0;
b <
c->num_bands;
b++) {
305 if (
c->ms_info[band_off +
b]) {
307 float l = *ch0,
r = *ch1;
312 ch0 +=
c->band_start[
b + 1] -
c->band_start[
b];
313 ch1 +=
c->band_start[
b + 1] -
c->band_start[
b];
316 band_off +=
c->num_bands;
323 memset(
src, 0,
sizeof(*
src) * order0);
324 memset(
src +
len - order1, 0,
sizeof(*
src) * order1);
328 int step,
int order0,
int order1,
const double *
const *
tabs)
336 for (
i = 0;
i < tab_step;
i++) {
338 for (j = 0; j < order0; j++)
339 sum +=
src[j] *
tab[j * tab_step +
i];
343 out =
dst + dst_len - tab_step;
345 src2 =
src + (dst_len - tab_step) /
step + 1 + order0;
346 for (
i = 0;
i < tab_step;
i++) {
348 for (j = 0; j < order1; j++)
349 sum +=
src2[j] *
tab[j * tab_step +
i];
355 const double *
tab,
int tab_len,
int step,
356 int order0,
int order1,
const double *
const *
tabs)
367 float in0 =
src1[order0 +
i];
368 int pos = (src2_len - 1) &
mask;
371 const double *t =
tab;
372 for (j =
pos; j >= 0; j--)
373 src2[j] += in0 * *t++;
374 for (j = 0; j < tab_len -
pos - 1; j++)
375 src2[src2_len - j - 1] += in0 *
tab[
pos + 1 + j];
377 for (j = 0; j < tab_len; j++)
384 #define CMUL1_R(s, t, is, it) \
385 s[is + 0] * t[it + 0] - s[is + 1] * t[it + 1]
386 #define CMUL1_I(s, t, is, it) \
387 s[is + 0] * t[it + 1] + s[is + 1] * t[it + 0]
388 #define CMUL2_R(s, t, is, it) \
389 s[is + 0] * t[it + 0] + s[is + 1] * t[it + 1]
390 #define CMUL2_I(s, t, is, it) \
391 s[is + 0] * t[it + 1] - s[is + 1] * t[it + 0]
393 #define CMUL0(dst, id, s0, s1, s2, s3, t0, t1, t2, t3, is, it) \
394 dst[id] = s0[is] * t0[it] + s1[is] * t1[it] \
395 + s2[is] * t2[it] + s3[is] * t3[it]; \
396 dst[id + 1] = s0[is] * t0[it + 1] + s1[is] * t1[it + 1] \
397 + s2[is] * t2[it + 1] + s3[is] * t3[it + 1];
399 #define CMUL1(dst, s0, s1, s2, s3, t0, t1, t2, t3, is, it) \
400 *dst++ = CMUL1_R(s0, t0, is, it) \
401 + CMUL1_R(s1, t1, is, it) \
402 + CMUL1_R(s2, t2, is, it) \
403 + CMUL1_R(s3, t3, is, it); \
404 *dst++ = CMUL1_I(s0, t0, is, it) \
405 + CMUL1_I(s1, t1, is, it) \
406 + CMUL1_I(s2, t2, is, it) \
407 + CMUL1_I(s3, t3, is, it);
409 #define CMUL2(dst, s0, s1, s2, s3, t0, t1, t2, t3, is, it) \
410 *dst++ = CMUL2_R(s0, t0, is, it) \
411 + CMUL2_R(s1, t1, is, it) \
412 + CMUL2_R(s2, t2, is, it) \
413 + CMUL2_R(s3, t3, is, it); \
414 *dst++ = CMUL2_I(s0, t0, is, it) \
415 + CMUL2_I(s1, t1, is, it) \
416 + CMUL2_I(s2, t2, is, it) \
417 + CMUL2_I(s3, t3, is, it);
420 const float *t0,
const float *t1,
421 const float *t2,
const float *t3,
int len,
int step)
423 const float *
h0, *h1, *h2, *h3;
426 int len2 =
len >> 1, len4 =
len >> 2;
431 for (
half = len2;
tmp > 1; half <<= 1, tmp >>= 1);
438 CMUL0(
dst, 0, s0, s1, s2, s3, t0, t1, t2, t3, 0, 0);
440 hoff = 2 *
step * (len4 >> 1);
445 d2 =
dst + 2 + (
len >> 1);
446 for (
i = 0;
i < (len4 - 1) >> 1;
i++) {
447 CMUL1(d1, s0, s1, s2, s3, t0, t1, t2, t3, j, k);
448 CMUL1(d2, s0, s1, s2, s3,
h0, h1, h2, h3, j, k);
452 CMUL0(
dst, len4, s0, s1, s2, s3, t0, t1, t2, t3, 1, hoff);
453 CMUL0(
dst, len4 + len2, s0, s1, s2, s3,
h0, h1, h2, h3, 1, hoff);
456 k = hoff + 2 *
step * len4;
458 d2 =
dst + len4 + 2 + len2;
459 for (
i = 0;
i < (len4 - 2) >> 1;
i++) {
460 CMUL2(d1, s0, s1, s2, s3, t0, t1, t2, t3, j, k);
461 CMUL2(d2, s0, s1, s2, s3,
h0, h1, h2, h3, j, k);
465 CMUL0(
dst, len2 + 4, s0, s1, s2, s3, t0, t1, t2, t3, 0, k);
469 float *tmp0,
float *tmp1)
471 memcpy(tmp1, tmp0, 384 *
sizeof(*tmp0));
472 memcpy(tmp0 + 384, tmp1 + 384, 128 *
sizeof(*tmp0));
479 c->fft64_fn(
c->fft64,
src + 0, tmp1 + 0,
sizeof(
float));
480 c->fft64_fn(
c->fft64,
src + 128, tmp1 + 128,
sizeof(
float));
481 c->fft64_fn(
c->fft64,
src + 256, tmp1 + 256,
sizeof(
float));
482 c->fft64_fn(
c->fft64,
src + 384, tmp1 + 384,
sizeof(
float));
488 c->fft256_fn(
c->fft256,
src, tmp1,
sizeof(
float));
497 float *tmp0,
float *tmp1)
499 memcpy(tmp1, tmp0, 768 *
sizeof(*tmp0));
500 memcpy(tmp0 + 768, tmp1 + 768, 256 *
sizeof(*tmp0));
507 c->fft128_fn(
c->fft128,
src + 0, tmp1 + 0,
sizeof(
float));
508 c->fft128_fn(
c->fft128,
src + 256, tmp1 + 256,
sizeof(
float));
509 c->fft128_fn(
c->fft128,
src + 512, tmp1 + 512,
sizeof(
float));
510 c->fft128_fn(
c->fft128,
src + 768, tmp1 + 768,
sizeof(
float));
516 c->fft512_fn(
c->fft512,
src, tmp1,
sizeof(
float));
526 float *tmp0 =
c->temp, *tmp1 =
c->temp + 1024;
528 memset(tmp0, 0,
sizeof(*tmp0) * 1024);
529 memset(tmp1, 0,
sizeof(*tmp1) * 1024);
553 memset(tmp0, 0, 64 *
sizeof(*tmp0));
591 memset(tmp0, 0, 128 *
sizeof(*tmp0));
612 float *tmp0 =
c->temp, *tmp1 =
c->temp + 1024;
614 memset(tmp0, 0,
sizeof(*tmp0) * 1024);
615 memset(tmp1, 0,
sizeof(*tmp1) * 1024);
635 memset(tmp0, 0, 64 *
sizeof(*tmp0));
667 memset(tmp0, 0, 128 *
sizeof(*tmp0));
688 for (ch = 0; ch <
c->avctx->ch_layout.nb_channels; ch++) {
690 float *in =
c->coeffs[ch];
691 float *saved =
c->delay[ch];
692 float *buf =
c->mdct_buf;
693 float *wout =
out + 448;
695 switch (
c->window_type) {
697 c->mdct_fn(
c->mdct, buf, in,
sizeof(
float));
700 c->wtf(
c, buf, in, 1024);
703 c->wtf(
c, buf, in, 512);
704 c->mdct_half_fn(
c->mdct_half, buf + 512, in + 512,
sizeof(
float));
705 for (
i = 0;
i < 256;
i++) {
706 FFSWAP(
float, buf[
i + 512], buf[1023 -
i]);
710 c->mdct_half_fn(
c->mdct_half, buf, in,
sizeof(
float));
711 for (
i = 0;
i < 256;
i++) {
712 FFSWAP(
float, buf[
i], buf[511 -
i]);
714 c->wtf(
c, buf + 512, in + 512, 512);
718 memcpy(
out, saved, 448 *
sizeof(
float));
719 c->fdsp->vector_fmul_window(wout, saved + 448, buf,
c->short_win, 64);
720 memcpy(wout + 128, buf + 64, 448 *
sizeof(
float));
721 memcpy(saved, buf + 512, 448 *
sizeof(
float));
722 memcpy(saved + 448, buf + 7*128 + 64, 64 *
sizeof(
float));
736 float *buf =
c->mdct_buf;
737 float *
temp =
c->temp;
739 switch (
c->window_type) {
743 c->mdct_fn(
c->mdct, buf, in,
sizeof(
float));
747 c->mdct_small_fn(
c->mdct_small, buf +
i, in +
i,
sizeof(
float));
755 c->fdsp->vector_fmul_window(
out, saved, buf,
c->long_win, 512);
757 float *wout =
out + 448;
758 memcpy(
out, saved, 448 *
sizeof(
float));
761 c->fdsp->vector_fmul_window(wout + 0*128, saved + 448, buf + 0*128,
c->short_win, 64);
762 c->fdsp->vector_fmul_window(wout + 1*128, buf + 0*128 + 64, buf + 1*128,
c->short_win, 64);
763 c->fdsp->vector_fmul_window(wout + 2*128, buf + 1*128 + 64, buf + 2*128,
c->short_win, 64);
764 c->fdsp->vector_fmul_window(wout + 3*128, buf + 2*128 + 64, buf + 3*128,
c->short_win, 64);
765 c->fdsp->vector_fmul_window(
temp, buf + 3*128 + 64, buf + 4*128,
c->short_win, 64);
766 memcpy(wout + 4*128,
temp, 64 *
sizeof(
float));
768 c->fdsp->vector_fmul_window(wout, saved + 448, buf,
c->short_win, 64);
769 memcpy(wout + 128, buf + 64, 448 *
sizeof(
float));
774 switch (
c->window_type) {
776 memcpy(saved,
temp + 64, 64 *
sizeof(
float));
777 c->fdsp->vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128,
c->short_win, 64);
778 c->fdsp->vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128,
c->short_win, 64);
779 c->fdsp->vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128,
c->short_win, 64);
780 memcpy(saved + 448, buf + 7*128 + 64, 64 *
sizeof(
float));
783 memcpy(saved, buf + 512, 448 *
sizeof(
float));
784 memcpy(saved + 448, buf + 7*128 + 64, 64 *
sizeof(
float));
788 memcpy(saved, buf + 512, 512 *
sizeof(
float));
807 c->prev_window_type =
c->window_type;
810 c->band_start =
c->modes[
c->window_type].band_start;
811 c->num_windows =
c->modes[
c->window_type].num_windows;
812 c->num_bands =
c->modes[
c->window_type].num_bands;
816 for (
i = 1;
i <
c->num_windows;
i++)
820 for (
i = 0;
i <
c->avctx->ch_layout.nb_channels;
i++)
823 if (
c->avctx->ch_layout.nb_channels == 2 &&
c->ms_present)
826 for (
i = 0;
i <
c->avctx->ch_layout.nb_channels;
i++)
836 int *got_frame_ptr,
AVPacket *avpkt)
838 const uint8_t *buf = avpkt->
data;
839 int buf_size = avpkt->
size;
880 frame, audio_off)) < 0)
897 for (
i = 1;
i < 16;
i++)
925 "Stereo mode support is not good, patch is welcome\n");
930 for (
i = 0;
i < 20;
i++)
937 1024 *
sizeof(*
c->long_win));
940 1024 *
sizeof(*
c->long_win));
948 scale = 1.0 / (1024*32768);
952 scale = 1.0 / (512*32768);
956 scale = 1.0 / (128*32768);
978 for (
i = 1;
i < 16;
i++) {
982 syms, 2, 2, 0, 0,
avctx);
static int on2avc_decode_quads(On2AVCContext *c, GetBitContext *gb, float *dst, int dst_size, int type, float band_scale)
static int get_egolomb(GetBitContext *gb)
@ AV_SAMPLE_FMT_FLTP
float, planar
static av_cold int on2avc_decode_init(AVCodecContext *avctx)
#define AV_LOG_WARNING
Something somehow does not look correct.
static const float h0[64]
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
static int on2avc_apply_ms(On2AVCContext *c)
const FFCodec ff_on2avc_decoder
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
static float on2avc_scale(int v, float scale)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const uint8_t ff_on2avc_cb_lens[]
#define AV_CHANNEL_LAYOUT_STEREO
int sample_rate
samples per second
static void twiddle(float *src1, float *src2, int src2_len, const double *tab, int tab_len, int step, int order0, int order1, const double *const *tabs)
const double *const ff_on2avc_tabs_19_40_2[19]
float delay[2][ON2AVC_SUBFRAME_SIZE]
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static void wtf_44(On2AVCContext *c, float *out, float *src, int size)
#define ON2AVC_SCALE_DIFFS
This structure describes decoded (raw) audio or video data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
int ms_info[ON2AVC_MAX_BANDS]
float short_win[ON2AVC_SUBFRAME_SIZE/8]
static int on2avc_reconstruct_channel(On2AVCContext *c, int channel, AVFrame *dst, int offset)
static uint8_t half(int a, int b)
static int on2avc_decode_pairs(On2AVCContext *c, GetBitContext *gb, float *dst, int dst_size, int type, float band_scale)
uint8_t band_run_end[ON2AVC_MAX_BANDS]
int nb_channels
Number of channels in this layout.
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
const double ff_on2avc_tab_84_1[]
static void combine_fft(float *s0, float *s1, float *s2, float *s3, float *dst, const float *t0, const float *t1, const float *t2, const float *t3, int len, int step)
const float ff_on2avc_window_long_32000[1024]
const double *const ff_on2avc_tabs_9_20_2[9]
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
void(* wtf)(struct On2AVCContext *ctx, float *out, float *in, int size)
AVCodec p
The public AVCodec.
static void wtf_end_512(On2AVCContext *c, float *out, float *src, float *tmp0, float *tmp1)
AVChannelLayout ch_layout
Audio channel layout.
static const struct twinvq_data tab
const double *const ff_on2avc_tabs_20_84_2[20]
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int on2avc_reconstruct_channel_ext(On2AVCContext *c, AVFrame *dst, int offset)
static void pretwiddle(float *src, float *dst, int dst_len, int tab_step, int step, int order0, int order1, const double *const *tabs)
const On2AVCMode ff_on2avc_modes_44[8]
const int ff_on2avc_cb_elems[]
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
static __device__ float ceil(float a)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
@ AV_TX_FLOAT_MDCT
Standard MDCT with a sample data type of float, double or int32_t, respecively.
#define FF_CODEC_DECODE_CB(func)
#define ON2AVC_SUBFRAME_SIZE
float band_scales[ON2AVC_MAX_BANDS]
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
const double ff_on2avc_tab_10_1[]
#define CODEC_LONG_NAME(str)
const double ff_on2avc_tab_84_3[]
const double *const ff_on2avc_tabs_20_84_3[20]
const uint8_t ff_on2avc_scale_diff_bits[ON2AVC_SCALE_DIFFS]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
float coeffs[2][ON2AVC_SUBFRAME_SIZE]
const double *const ff_on2avc_tabs_20_84_4[20]
const double *const ff_on2avc_tabs_19_40_1[19]
float mdct_buf[ON2AVC_SUBFRAME_SIZE]
static unsigned int get_bits1(GetBitContext *s)
const double *const ff_on2avc_tabs_20_84_1[20]
const uint16_t ff_on2avc_cb_syms[]
static __device__ float sqrtf(float a)
const float ff_on2avc_ctab_4[2048]
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
const double ff_on2avc_tab_84_4[]
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const uint8_t ff_on2avc_scale_diff_syms[ON2AVC_SCALE_DIFFS]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
static av_cold int on2avc_decode_close(AVCodecContext *avctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define CMUL2(dst, s0, s1, s2, s3, t0, t1, t2, t3, is, it)
#define CMUL1(dst, s0, s1, s2, s3, t0, t1, t2, t3, is, it)
static void wtf_40(On2AVCContext *c, float *out, float *src, int size)
An AVChannelLayout holds information about the channel layout of audio data.
#define DECLARE_ALIGNED(n, t, v)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
#define CMUL0(dst, id, s0, s1, s2, s3, t0, t1, t2, t3, is, it)
enum AVSampleFormat sample_fmt
audio sample format
static const struct @166 tabs[]
const float ff_on2avc_ctab_3[2048]
static int on2avc_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
const double ff_on2avc_tab_40_2[]
const double ff_on2avc_tab_20_2[]
static int on2avc_read_channel_data(On2AVCContext *c, GetBitContext *gb, int ch)
#define i(width, name, range_min, range_max)
static void zero_head_and_tail(float *src, int len, int order0, int order1)
float long_win[ON2AVC_SUBFRAME_SIZE]
AVSampleFormat
Audio sample formats.
const char * name
Name of the codec implementation.
static void on2avc_read_ms_info(On2AVCContext *c, GetBitContext *gb)
static av_cold void on2avc_free_vlcs(On2AVCContext *c)
static int on2avc_decode_band_scales(On2AVCContext *c, GetBitContext *gb)
void ff_vlc_free(VLC *vlc)
#define FFSWAP(type, a, b)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int on2avc_decode_band_types(On2AVCContext *c, GetBitContext *gb)
const On2AVCMode ff_on2avc_modes_40[8]
const double *const ff_on2avc_tabs_9_20_1[9]
static const uint8_t run_len[7][16]
static const int16_t steps[16]
main external API structure.
static int on2avc_decode_subframe(On2AVCContext *c, const uint8_t *buf, int buf_size, AVFrame *dst, int offset)
const double ff_on2avc_tab_20_1[]
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
const double *const ff_on2avc_tabs_4_10_2[4]
static av_const int sign_extend(int val, unsigned bits)
const float ff_on2avc_window_long_24000[1024]
const double *const ff_on2avc_tabs_4_10_1[4]
const float ff_on2avc_ctab_1[2048]
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define avpriv_request_sample(...)
const double ff_on2avc_tab_10_2[]
#define AV_CHANNEL_LAYOUT_MONO
const float ff_on2avc_ctab_2[2048]
const double ff_on2avc_tab_40_1[]
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
static void scale(int *out, const int *in, const int w, const int h, const int shift)
This structure stores compressed data.
static void wtf_end_1024(On2AVCContext *c, float *out, float *src, float *tmp0, float *tmp1)
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
const double ff_on2avc_tab_84_2[]
const float ff_on2avc_window_short[128]
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
uint8_t band_type[ON2AVC_MAX_BANDS]
float temp[ON2AVC_SUBFRAME_SIZE *2]