66 double freq = 2*
M_PI/m;
68 for(
int i = 0;
i <= m/4;
i++)
69 tab[
i] = RESCALE(cos(
i*freq));
70 for(
int i = 1;
i < m/4;
i++)
71 tab[m/2 -
i] = tab[
i];
74 #define INIT_FF_COS_TABS_FUNC(index, size) \ 75 static av_cold void init_cos_tabs_ ## size (void) \ 77 init_cos_tabs_idx(index); \ 108 { init_cos_tabs_16, AV_ONCE_INIT },
109 { init_cos_tabs_32, AV_ONCE_INIT },
110 { init_cos_tabs_64, AV_ONCE_INIT },
111 { init_cos_tabs_128, AV_ONCE_INIT },
112 { init_cos_tabs_256, AV_ONCE_INIT },
113 { init_cos_tabs_512, AV_ONCE_INIT },
114 { init_cos_tabs_1024, AV_ONCE_INIT },
115 { init_cos_tabs_2048, AV_ONCE_INIT },
116 { init_cos_tabs_4096, AV_ONCE_INIT },
117 { init_cos_tabs_8192, AV_ONCE_INIT },
118 { init_cos_tabs_16384, AV_ONCE_INIT },
119 { init_cos_tabs_32768, AV_ONCE_INIT },
120 { init_cos_tabs_65536, AV_ONCE_INIT },
121 { init_cos_tabs_131072, AV_ONCE_INIT },
127 cos_tabs_init_once[index].
func);
138 BF(tmp[0].
re, tmp[1].
im, in[1].im, in[2].im);
139 BF(tmp[0].im, tmp[1].re, in[1].re, in[2].re);
145 mtmp[0] = (int64_t)
TX_NAME(ff_cos_53)[0].
re * tmp[0].
re;
146 mtmp[1] = (int64_t)
TX_NAME(ff_cos_53)[0].
im * tmp[0].
im;
147 mtmp[2] = (int64_t)
TX_NAME(ff_cos_53)[1].
re * tmp[1].
re;
148 mtmp[3] = (int64_t)
TX_NAME(ff_cos_53)[1].
re * tmp[1].
im;
149 out[1*
stride].
re = in[0].
re - (mtmp[2] + mtmp[0] + 0x40000000 >> 31);
150 out[1*
stride].
im = in[0].
im - (mtmp[3] - mtmp[1] + 0x40000000 >> 31);
151 out[2*
stride].
re = in[0].
re - (mtmp[2] - mtmp[0] + 0x40000000 >> 31);
152 out[2*
stride].
im = in[0].
im - (mtmp[3] + mtmp[1] + 0x40000000 >> 31);
165 #define DECL_FFT5(NAME, D0, D1, D2, D3, D4) \ 166 static av_always_inline void NAME(FFTComplex *out, FFTComplex *in, \ 169 FFTComplex z0[4], t[6]; \ 171 BF(t[1].im, t[0].re, in[1].re, in[4].re); \ 172 BF(t[1].re, t[0].im, in[1].im, in[4].im); \ 173 BF(t[3].im, t[2].re, in[2].re, in[3].re); \ 174 BF(t[3].re, t[2].im, in[2].im, in[3].im); \ 176 out[D0*stride].re = in[0].re + t[0].re + t[2].re; \ 177 out[D0*stride].im = in[0].im + t[0].im + t[2].im; \ 179 SMUL(t[4].re, t[0].re, TX_NAME(ff_cos_53)[2].re, TX_NAME(ff_cos_53)[3].re, t[2].re, t[0].re); \ 180 SMUL(t[4].im, t[0].im, TX_NAME(ff_cos_53)[2].re, TX_NAME(ff_cos_53)[3].re, t[2].im, t[0].im); \ 181 CMUL(t[5].re, t[1].re, TX_NAME(ff_cos_53)[2].im, TX_NAME(ff_cos_53)[3].im, t[3].re, t[1].re); \ 182 CMUL(t[5].im, t[1].im, TX_NAME(ff_cos_53)[2].im, TX_NAME(ff_cos_53)[3].im, t[3].im, t[1].im); \ 184 BF(z0[0].re, z0[3].re, t[0].re, t[1].re); \ 185 BF(z0[0].im, z0[3].im, t[0].im, t[1].im); \ 186 BF(z0[2].re, z0[1].re, t[4].re, t[5].re); \ 187 BF(z0[2].im, z0[1].im, t[4].im, t[5].im); \ 189 out[D1*stride].re = in[0].re + z0[3].re; \ 190 out[D1*stride].im = in[0].im + z0[0].im; \ 191 out[D2*stride].re = in[0].re + z0[2].re; \ 192 out[D2*stride].im = in[0].im + z0[1].im; \ 193 out[D3*stride].re = in[0].re + z0[1].re; \ 194 out[D3*stride].im = in[0].im + z0[2].im; \ 195 out[D4*stride].re = in[0].re + z0[0].re; \ 196 out[D4*stride].im = in[0].im + z0[3].im; \ 209 for (
int i = 0;
i < 5;
i++)
210 fft3(tmp +
i, in +
i*3, 5);
212 fft5_m1(out, tmp + 0, stride);
213 fft5_m2(out, tmp + 5, stride);
214 fft5_m3(out, tmp + 10, stride);
217 #define BUTTERFLIES(a0,a1,a2,a3) {\ 219 BF(a2.re, a0.re, a0.re, t5);\ 220 BF(a3.im, a1.im, a1.im, t3);\ 222 BF(a3.re, a1.re, a1.re, t4);\ 223 BF(a2.im, a0.im, a0.im, t6);\ 229 #define BUTTERFLIES_BIG(a0,a1,a2,a3) {\ 230 FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\ 232 BF(a2.re, a0.re, r0, t5);\ 233 BF(a3.im, a1.im, i1, t3);\ 235 BF(a3.re, a1.re, r1, t4);\ 236 BF(a2.im, a0.im, i0, t6);\ 239 #define TRANSFORM(a0,a1,a2,a3,wre,wim) {\ 240 CMUL(t1, t2, a2.re, a2.im, wre, -wim);\ 241 CMUL(t5, t6, a3.re, a3.im, wre, wim);\ 242 BUTTERFLIES(a0,a1,a2,a3)\ 245 #define TRANSFORM_ZERO(a0,a1,a2,a3) {\ 250 BUTTERFLIES(a0,a1,a2,a3)\ 255 static void name(FFTComplex *z, const FFTSample *wre, unsigned int n)\ 257 FFTSample t1, t2, t3, t4, t5, t6;\ 261 const FFTSample *wim = wre+o1;\ 264 TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3]);\ 265 TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\ 270 TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0]);\ 271 TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\ 277 #define BUTTERFLIES BUTTERFLIES_BIG 280 #define DECL_FFT(n,n2,n4)\ 281 static void fft##n(FFTComplex *z)\ 286 pass(z,TX_NAME(ff_cos_##n),n4/2);\ 301 BF(t3, t1, z[0].
re, z[1].re);
302 BF(t8, t6, z[3].re, z[2].re);
303 BF(z[2].re, z[0].re, t1, t6);
304 BF(t4, t2, z[0].
im, z[1].im);
305 BF(t7, t5, z[2].im, z[3].im);
306 BF(z[3].im, z[1].im, t4, t8);
307 BF(z[3].re, z[1].re, t3, t7);
308 BF(z[2].im, z[0].im, t2, t5);
317 BF(t1, z[5].
re, z[4].re, -z[5].re);
318 BF(t2, z[5].
im, z[4].im, -z[5].im);
319 BF(t5, z[7].re, z[6].re, -z[7].re);
320 BF(t6, z[7].im, z[6].im, -z[7].im);
338 TRANSFORM(z[1],z[5],z[9],z[13],cos_16_1,cos_16_3);
339 TRANSFORM(z[3],z[7],z[11],z[15],cos_16_3,cos_16_1);
347 #define pass pass_big 358 NULL,
fft2,
fft4,
fft8,
fft16, fft32, fft64, fft128, fft256, fft512,
359 fft1024, fft2048, fft4096, fft8192, fft16384, fft32768, fft65536, fft131072
362 #define DECL_COMP_FFT(N) \ 363 static void compound_fft_##N##xM(AVTXContext *s, void *_out, \ 364 void *_in, ptrdiff_t stride) \ 366 const int m = s->m, *in_map = s->pfatab, *out_map = in_map + N*m; \ 367 FFTComplex *in = _in; \ 368 FFTComplex *out = _out; \ 369 FFTComplex fft##N##in[N]; \ 370 void (*fftp)(FFTComplex *z) = fft_dispatch[av_log2(m)]; \ 372 for (int i = 0; i < m; i++) { \ 373 for (int j = 0; j < N; j++) \ 374 fft##N##in[j] = in[in_map[i*N + j]]; \ 375 fft##N(s->tmp + s->revtab[i], fft##N##in, m); \ 378 for (int i = 0; i < N; i++) \ 379 fftp(s->tmp + m*i); \ 381 for (int i = 0; i < N*m; i++) \ 382 out[i] = s->tmp[out_map[i]]; \ 395 for (
int i = 0;
i < m;
i++)
396 out[s->revtab[
i]] = in[
i];
408 for(
int i = 0;
i < n;
i++) {
410 for(
int j = 0; j < n; j++) {
411 const double factor = phase*
i*j;
413 RESCALE(cos(factor)),
414 RESCALE(sin(factor)),
417 CMUL3(res, in[j], mult);
425 #define DECL_COMP_IMDCT(N) \ 426 static void compound_imdct_##N##xM(AVTXContext *s, void *_dst, void *_src, \ 429 FFTComplex fft##N##in[N]; \ 430 FFTComplex *z = _dst, *exp = s->exptab; \ 431 const int m = s->m, len8 = N*m >> 1; \ 432 const int *in_map = s->pfatab, *out_map = in_map + N*m; \ 433 const FFTSample *src = _src, *in1, *in2; \ 434 void (*fftp)(FFTComplex *) = fft_dispatch[av_log2(m)]; \ 436 stride /= sizeof(*src); \ 438 in2 = src + ((N*m*2) - 1) * stride; \ 440 for (int i = 0; i < m; i++) { \ 441 for (int j = 0; j < N; j++) { \ 442 const int k = in_map[i*N + j]; \ 443 FFTComplex tmp = { in2[-k*stride], in1[k*stride] }; \ 444 CMUL3(fft##N##in[j], tmp, exp[k >> 1]); \ 446 fft##N(s->tmp + s->revtab[i], fft##N##in, m); \ 449 for (int i = 0; i < N; i++) \ 450 fftp(s->tmp + m*i); \ 452 for (int i = 0; i < len8; i++) { \ 453 const int i0 = len8 + i, i1 = len8 - i - 1; \ 454 const int s0 = out_map[i0], s1 = out_map[i1]; \ 455 FFTComplex src1 = { s->tmp[s1].im, s->tmp[s1].re }; \ 456 FFTComplex src0 = { s->tmp[s0].im, s->tmp[s0].re }; \ 458 CMUL(z[i1].re, z[i0].im, src1.re, src1.im, exp[i1].im, exp[i1].re); \ 459 CMUL(z[i0].re, z[i1].im, src0.re, src0.im, exp[i0].im, exp[i0].re); \ 467 #define DECL_COMP_MDCT(N) \ 468 static void compound_mdct_##N##xM(AVTXContext *s, void *_dst, void *_src, \ 471 FFTSample *src = _src, *dst = _dst; \ 472 FFTComplex *exp = s->exptab, tmp, fft##N##in[N]; \ 473 const int m = s->m, len4 = N*m, len3 = len4 * 3, len8 = len4 >> 1; \ 474 const int *in_map = s->pfatab, *out_map = in_map + N*m; \ 475 void (*fftp)(FFTComplex *) = fft_dispatch[av_log2(m)]; \ 477 stride /= sizeof(*dst); \ 479 for (int i = 0; i < m; i++) { \ 480 for (int j = 0; j < N; j++) { \ 481 const int k = in_map[i*N + j]; \ 483 tmp.re = FOLD(-src[ len4 + k], src[1*len4 - 1 - k]); \ 484 tmp.im = FOLD(-src[ len3 + k], -src[1*len3 - 1 - k]); \ 486 tmp.re = FOLD(-src[ len4 + k], -src[5*len4 - 1 - k]); \ 487 tmp.im = FOLD( src[-len4 + k], -src[1*len3 - 1 - k]); \ 489 CMUL(fft##N##in[j].im, fft##N##in[j].re, tmp.re, tmp.im, \ 490 exp[k >> 1].re, exp[k >> 1].im); \ 492 fft##N(s->tmp + s->revtab[i], fft##N##in, m); \ 495 for (int i = 0; i < N; i++) \ 496 fftp(s->tmp + m*i); \ 498 for (int i = 0; i < len8; i++) { \ 499 const int i0 = len8 + i, i1 = len8 - i - 1; \ 500 const int s0 = out_map[i0], s1 = out_map[i1]; \ 501 FFTComplex src1 = { s->tmp[s1].re, s->tmp[s1].im }; \ 502 FFTComplex src0 = { s->tmp[s0].re, s->tmp[s0].im }; \ 504 CMUL(dst[2*i1*stride + stride], dst[2*i0*stride], src0.re, src0.im, \ 505 exp[i0].im, exp[i0].re); \ 506 CMUL(dst[2*i0*stride + stride], dst[2*i1*stride], src1.re, src1.im, \ 507 exp[i1].im, exp[i1].re); \ 519 const int m = s->m, len8 = m >> 1;
523 stride /=
sizeof(*src);
525 in2 = src + ((m*2) - 1) *
stride;
527 for (
int i = 0;
i < m;
i++) {
529 CMUL3(z[s->revtab[
i]], tmp, exp[i]);
534 for (
int i = 0;
i < len8;
i++) {
535 const int i0 = len8 +
i, i1 = len8 - i - 1;
539 CMUL(z[i1].
re, z[i0].
im, src1.
re, src1.
im, exp[i1].im, exp[i1].re);
540 CMUL(z[i0].re, z[i1].im, src0.
re, src0.
im, exp[i0].im, exp[i0].re);
549 const int m = s->
m, len4 = m, len3 = len4 * 3, len8 = len4 >> 1;
552 stride /=
sizeof(*dst);
554 for (
int i = 0;
i < m;
i++) {
557 tmp.re = FOLD(-src[ len4 + k], src[1*len4 - 1 - k]);
558 tmp.im = FOLD(-src[ len3 + k], -src[1*len3 - 1 - k]);
560 tmp.re = FOLD(-src[ len4 + k], -src[5*len4 - 1 - k]);
561 tmp.im = FOLD( src[-len4 + k], -src[1*len3 - 1 - k]);
564 exp[i].
re, exp[i].
im);
569 for (
int i = 0;
i < len8;
i++) {
570 const int i0 = len8 +
i, i1 = len8 - i - 1;
574 CMUL(dst[2*i1*stride + stride], dst[2*i0*stride], src0.
re, src0.
im,
575 exp[i0].
im, exp[i0].
re);
576 CMUL(dst[2*i0*stride + stride], dst[2*i1*stride], src1.
re, src1.
im,
577 exp[i1].
im, exp[i1].
re);
588 double scale = s->
scale;
589 const double phase =
M_PI/(4.0*len2);
591 stride /=
sizeof(*src);
593 for (
int i = 0;
i <
len;
i++) {
596 double i_d = phase * (4*len - 2*
i - 1);
597 double i_u = phase * (3*len2 + 2*
i + 1);
598 for (
int j = 0; j < len2; j++) {
599 double a = (2 * j + 1);
600 double a_d = cos(a * i_d);
601 double a_u = cos(a * i_u);
602 double val = UNSCALE(src[j*stride]);
606 dst[
i + 0] = RESCALE( sum_d*scale);
607 dst[
i +
len] = RESCALE(-sum_u*scale);
617 double scale = s->
scale;
618 const double phase =
M_PI/(4.0*
len);
620 stride /=
sizeof(*dst);
622 for (
int i = 0;
i <
len;
i++) {
624 for (
int j = 0; j < len*2; j++) {
625 int a = (2*j + 1 +
len) * (2*
i + 1);
626 sum += UNSCALE(src[j]) * cos(a * phase);
628 dst[
i*
stride] = RESCALE(sum*scale);
634 const double theta = (scale < 0 ? len4 : 0) + 1.0/8.0;
639 scale = sqrt(
fabs(scale));
640 for (
int i = 0;
i < len4;
i++) {
642 s->
exptab[
i].
re = RESCALE(cos(alpha) * scale);
643 s->
exptab[
i].
im = RESCALE(sin(alpha) * scale);
651 const void *scale, uint64_t
flags)
661 #define CHECK_FACTOR(DST, FACTOR, SRC) \ 662 if (DST == 1 && !(SRC % FACTOR)) { \ 672 if (!(len & (len - 1)) && len >= 2 && len <= max_ptwo) {
685 if (len > 1 || m == 1) {
686 if (is_mdct && (l & 1))
692 s->scale = *((SCALE_TYPE *)scale);
698 if (n > 1 && m > 1) {
701 if (!(s->tmp =
av_malloc(n*m*
sizeof(*s->tmp))))
703 *tx = n == 3 ? compound_fft_3xM :
704 n == 5 ? compound_fft_5xM :
707 *tx = n == 3 ? inv ? compound_imdct_3xM : compound_mdct_3xM :
708 n == 5 ? inv ? compound_imdct_5xM : compound_mdct_5xM :
709 inv ? compound_imdct_15xM : compound_mdct_15xM;
#define DECL_FFT(n, n2, n4)
int ff_tx_gen_compound_mapping(AVTXContext *s)
static av_cold void ff_init_53_tabs(void)
static void monolithic_mdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
static av_cold void init_cos_tabs(int index)
static FFTSample *const cos_tabs[18]
static av_always_inline void fft15(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)
#define BUTTERFLIES(a0, a1, a2, a3)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static void sum_d(const int *input, int *output, int len)
static void monolithic_fft(AVTXContext *s, void *_out, void *_in, ptrdiff_t stride)
#define INIT_FF_COS_TABS_FUNC(index, size)
static void fft5(FFTComplex *out, FFTComplex *in, FFTComplex exptab[2])
static av_always_inline void init_cos_tabs_idx(int index)
int ff_tx_type_is_mdct(enum AVTXType type)
#define DECL_COMP_MDCT(N)
FFTComplex TX_NAME(ff_cos_53)[4]
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
#define CHECK_FACTOR(DST, FACTOR, SRC)
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
static __device__ float fabs(float a)
static void monolithic_imdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
static int16_t mult(Float11 *f1, Float11 *f2)
static void(*const fft_dispatch[])(FFTComplex *)
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static void fft8(FFTComplex *z)
#define DECL_FFT5(NAME, D0, D1, D2, D3, D4)
#define FF_ARRAY_ELEMS(a)
static void naive_imdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
static void naive_fft(AVTXContext *s, void *_out, void *_in, ptrdiff_t stride)
static void naive_mdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
static const int16_t alpha[]
static void fft16(FFTComplex *z)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int(* func)(AVBPrint *dst, const char *in, const char *arg)
static av_always_inline void fft3(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)
static const int factor[16]
#define flags(name, subs,...)
static int gen_mdct_exptab(AVTXContext *s, int len4, double scale)
GLint GLenum GLboolean GLsizei stride
#define TRANSFORM(a0, a1, a2, a3, wre, wim)
static void fft2(FFTComplex *z)
#define TRANSFORM_ZERO(a0, a1, a2, a3)
static void fft4(FFTComplex *z)
static int ff_thread_once(char *control, void(*routine)(void))
int TX_NAME() ff_tx_init_mdct_fft(AVTXContext *s, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
static const struct twinvq_data tab
#define av_malloc_array(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int ff_tx_gen_ptwo_revtab(AVTXContext *s)
static double val(void *priv, double ch)
static CosTabsInitOnce cos_tabs_init_once[]
#define DECL_COMP_IMDCT(N)