Go to the documentation of this file.
70 double freq = 2*
M_PI/m;
73 for (
int i = 0;
i < m/4;
i++)
74 *
tab++ = RESCALE(cos(
i*freq));
79 #define INIT_FF_COS_TABS_FUNC(index, size) \
80 static av_cold void init_cos_tabs_ ## size (void) \
82 init_cos_tabs_idx(index); \
166 mtmp[0] = (int64_t)
TX_NAME(ff_cos_53)[0].
re *
tmp[0].re;
167 mtmp[1] = (int64_t)
TX_NAME(ff_cos_53)[0].
im *
tmp[0].im;
168 mtmp[2] = (int64_t)
TX_NAME(ff_cos_53)[1].
re *
tmp[1].re;
169 mtmp[3] = (int64_t)
TX_NAME(ff_cos_53)[1].
re *
tmp[1].im;
170 out[1*
stride].re = in[0].
re - (mtmp[2] + mtmp[0] + 0x40000000 >> 31);
171 out[1*
stride].im = in[0].
im - (mtmp[3] - mtmp[1] + 0x40000000 >> 31);
172 out[2*
stride].re = in[0].
re - (mtmp[2] - mtmp[0] + 0x40000000 >> 31);
173 out[2*
stride].im = in[0].
im - (mtmp[3] + mtmp[1] + 0x40000000 >> 31);
186 #define DECL_FFT5(NAME, D0, D1, D2, D3, D4) \
187 static av_always_inline void NAME(FFTComplex *out, FFTComplex *in, \
190 FFTComplex z0[4], t[6]; \
192 BF(t[1].im, t[0].re, in[1].re, in[4].re); \
193 BF(t[1].re, t[0].im, in[1].im, in[4].im); \
194 BF(t[3].im, t[2].re, in[2].re, in[3].re); \
195 BF(t[3].re, t[2].im, in[2].im, in[3].im); \
197 out[D0*stride].re = in[0].re + t[0].re + t[2].re; \
198 out[D0*stride].im = in[0].im + t[0].im + t[2].im; \
200 SMUL(t[4].re, t[0].re, TX_NAME(ff_cos_53)[2].re, TX_NAME(ff_cos_53)[3].re, t[2].re, t[0].re); \
201 SMUL(t[4].im, t[0].im, TX_NAME(ff_cos_53)[2].re, TX_NAME(ff_cos_53)[3].re, t[2].im, t[0].im); \
202 CMUL(t[5].re, t[1].re, TX_NAME(ff_cos_53)[2].im, TX_NAME(ff_cos_53)[3].im, t[3].re, t[1].re); \
203 CMUL(t[5].im, t[1].im, TX_NAME(ff_cos_53)[2].im, TX_NAME(ff_cos_53)[3].im, t[3].im, t[1].im); \
205 BF(z0[0].re, z0[3].re, t[0].re, t[1].re); \
206 BF(z0[0].im, z0[3].im, t[0].im, t[1].im); \
207 BF(z0[2].re, z0[1].re, t[4].re, t[5].re); \
208 BF(z0[2].im, z0[1].im, t[4].im, t[5].im); \
210 out[D1*stride].re = in[0].re + z0[3].re; \
211 out[D1*stride].im = in[0].im + z0[0].im; \
212 out[D2*stride].re = in[0].re + z0[2].re; \
213 out[D2*stride].im = in[0].im + z0[1].im; \
214 out[D3*stride].re = in[0].re + z0[1].re; \
215 out[D3*stride].im = in[0].im + z0[2].im; \
216 out[D4*stride].re = in[0].re + z0[0].re; \
217 out[D4*stride].im = in[0].im + z0[3].im; \
245 mtmp[ 0] = ((int64_t)
tab[0].
re)*t[0].
re - ((int64_t)
tab[2].
re)*t[4].
re;
246 mtmp[ 1] = ((int64_t)
tab[0].
re)*t[4].
re - ((int64_t)
tab[1].
re)*t[0].
re;
247 mtmp[ 2] = ((int64_t)
tab[0].
re)*t[2].
re - ((int64_t)
tab[2].
re)*t[0].
re;
248 mtmp[ 3] = ((int64_t)
tab[0].
re)*t[0].
im - ((int64_t)
tab[1].
re)*t[2].
im;
249 mtmp[ 4] = ((int64_t)
tab[0].
re)*t[4].
im - ((int64_t)
tab[1].
re)*t[0].
im;
250 mtmp[ 5] = ((int64_t)
tab[0].
re)*t[2].
im - ((int64_t)
tab[2].
re)*t[0].
im;
252 mtmp[ 6] = ((int64_t)
tab[2].
im)*t[1].
im + ((int64_t)
tab[1].
im)*t[5].
im;
253 mtmp[ 7] = ((int64_t)
tab[0].
im)*t[5].
im + ((int64_t)
tab[2].
im)*t[3].
im;
254 mtmp[ 8] = ((int64_t)
tab[2].
im)*t[5].
im + ((int64_t)
tab[1].
im)*t[3].
im;
255 mtmp[ 9] = ((int64_t)
tab[0].
im)*t[1].
re + ((int64_t)
tab[1].
im)*t[3].
re;
256 mtmp[10] = ((int64_t)
tab[2].
im)*t[3].
re + ((int64_t)
tab[0].
im)*t[5].
re;
257 mtmp[11] = ((int64_t)
tab[2].
im)*t[1].
re + ((int64_t)
tab[1].
im)*t[5].
re;
259 z[0].
re = (
int32_t)(mtmp[ 0] - ((int64_t)
tab[1].re)*t[2].
re + 0x40000000 >> 31);
260 z[1].
re = (
int32_t)(mtmp[ 1] - ((int64_t)
tab[2].re)*t[2].
re + 0x40000000 >> 31);
261 z[2].
re = (
int32_t)(mtmp[ 2] - ((int64_t)
tab[1].re)*t[4].
re + 0x40000000 >> 31);
262 z[0].
im = (
int32_t)(mtmp[ 3] - ((int64_t)
tab[2].re)*t[4].
im + 0x40000000 >> 31);
263 z[1].
im = (
int32_t)(mtmp[ 4] - ((int64_t)
tab[2].re)*t[2].
im + 0x40000000 >> 31);
264 z[2].
im = (
int32_t)(mtmp[ 5] - ((int64_t)
tab[1].re)*t[4].
im + 0x40000000 >> 31);
266 t[0].
re = (
int32_t)(mtmp[ 6] - ((int64_t)
tab[0].im)*t[3].
im + 0x40000000 >> 31);
267 t[2].
re = (
int32_t)(mtmp[ 7] - ((int64_t)
tab[1].im)*t[1].
im + 0x40000000 >> 31);
268 t[4].
re = (
int32_t)(mtmp[ 8] + ((int64_t)
tab[0].im)*t[1].
im + 0x40000000 >> 31);
269 t[0].
im = (
int32_t)(mtmp[ 9] + ((int64_t)
tab[2].im)*t[5].
re + 0x40000000 >> 31);
270 t[2].
im = (
int32_t)(mtmp[10] - ((int64_t)
tab[1].im)*t[1].
re + 0x40000000 >> 31);
271 t[4].
im = (
int32_t)(mtmp[11] - ((int64_t)
tab[0].im)*t[3].
re + 0x40000000 >> 31);
330 w[0].re = t[0].
re - t[6].
re;
331 w[0].im = t[0].
im - t[6].
im;
332 w[1].re = t[2].
re - t[6].
re;
333 w[1].im = t[2].
im - t[6].
im;
334 w[2].re = t[1].
re - t[7].
re;
335 w[2].im = t[1].
im - t[7].
im;
336 w[3].re = t[3].
re + t[7].
re;
337 w[3].im = t[3].
im + t[7].
im;
339 z[0].
re = in[0].
re + t[4].
re;
340 z[0].
im = in[0].
im + t[4].
im;
342 z[1].
re = t[0].
re + t[2].
re + t[6].
re;
343 z[1].
im = t[0].
im + t[2].
im + t[6].
im;
349 mtmp[0] = t[1].
re - t[3].
re + t[7].
re;
350 mtmp[1] = t[1].
im - t[3].
im + t[7].
im;
352 y[3].
re = (
int32_t)(((int64_t)
tab[0].
im)*mtmp[0] + 0x40000000 >> 31);
353 y[3].
im = (
int32_t)(((int64_t)
tab[0].im)*mtmp[1] + 0x40000000 >> 31);
355 mtmp[0] = (
int32_t)(((int64_t)
tab[0].re)*z[1].
re + 0x40000000 >> 31);
356 mtmp[1] = (
int32_t)(((int64_t)
tab[0].re)*z[1].
im + 0x40000000 >> 31);
357 mtmp[2] = (
int32_t)(((int64_t)
tab[0].re)*t[4].
re + 0x40000000 >> 31);
358 mtmp[3] = (
int32_t)(((int64_t)
tab[0].re)*t[4].
im + 0x40000000 >> 31);
365 mtmp[0] = ((int64_t)
tab[1].
re)*
w[0].re;
366 mtmp[1] = ((int64_t)
tab[1].
re)*
w[0].im;
367 mtmp[2] = ((int64_t)
tab[2].
im)*
w[0].re;
368 mtmp[3] = ((int64_t)
tab[2].
im)*
w[0].im;
369 mtmp[4] = ((int64_t)
tab[1].
im)*
w[2].re;
370 mtmp[5] = ((int64_t)
tab[1].
im)*
w[2].im;
371 mtmp[6] = ((int64_t)
tab[2].
re)*
w[2].re;
372 mtmp[7] = ((int64_t)
tab[2].
re)*
w[2].im;
374 x[1].
re = (
int32_t)(mtmp[0] + ((int64_t)
tab[2].im)*
w[1].
re + 0x40000000 >> 31);
375 x[1].
im = (
int32_t)(mtmp[1] + ((int64_t)
tab[2].im)*
w[1].
im + 0x40000000 >> 31);
376 x[2].
re = (
int32_t)(mtmp[2] - ((int64_t)
tab[3].re)*
w[1].
re + 0x40000000 >> 31);
377 x[2].
im = (
int32_t)(mtmp[3] - ((int64_t)
tab[3].re)*
w[1].
im + 0x40000000 >> 31);
378 y[1].
re = (
int32_t)(mtmp[4] + ((int64_t)
tab[2].re)*
w[3].
re + 0x40000000 >> 31);
379 y[1].
im = (
int32_t)(mtmp[5] + ((int64_t)
tab[2].re)*
w[3].
im + 0x40000000 >> 31);
380 y[2].
re = (
int32_t)(mtmp[6] - ((int64_t)
tab[3].im)*
w[3].
re + 0x40000000 >> 31);
381 y[2].
im = (
int32_t)(mtmp[7] - ((int64_t)
tab[3].im)*
w[3].
im + 0x40000000 >> 31);
383 y[0].
re = (
int32_t)(((int64_t)
tab[0].im)*t[5].
re + 0x40000000 >> 31);
384 y[0].
im = (
int32_t)(((int64_t)
tab[0].im)*t[5].
im + 0x40000000 >> 31);
395 x[1].
re =
tab[1].re*
w[0].re +
tab[2].im*
w[1].re;
396 x[1].
im =
tab[1].re*
w[0].im +
tab[2].im*
w[1].im;
397 x[2].
re =
tab[2].im*
w[0].re -
tab[3].re*
w[1].re;
398 x[2].
im =
tab[2].im*
w[0].im -
tab[3].re*
w[1].im;
399 y[1].
re =
tab[1].im*
w[2].re +
tab[2].re*
w[3].re;
400 y[1].
im =
tab[1].im*
w[2].im +
tab[2].re*
w[3].im;
401 y[2].
re =
tab[2].re*
w[2].re -
tab[3].im*
w[3].re;
402 y[2].
im =
tab[2].re*
w[2].im -
tab[3].im*
w[3].im;
408 x[4].
re = x[1].
re + x[2].
re;
409 x[4].
im = x[1].
im + x[2].
im;
411 y[4].
re = y[1].
re - y[2].
re;
412 y[4].
im = y[1].
im - y[2].
im;
413 x[1].
re = z[0].
re + x[1].
re;
414 x[1].
im = z[0].
im + x[1].
im;
415 y[1].
re = y[0].
re + y[1].
re;
416 y[1].
im = y[0].
im + y[1].
im;
417 x[2].
re = z[0].
re + x[2].
re;
418 x[2].
im = z[0].
im + x[2].
im;
419 y[2].
re = y[2].
re - y[0].
re;
420 y[2].
im = y[2].
im - y[0].
im;
421 x[4].
re = z[0].
re - x[4].
re;
422 x[4].
im = z[0].
im - x[4].
im;
423 y[4].
re = y[0].
re - y[4].
re;
424 y[4].
im = y[0].
im - y[4].
im;
441 for (
int i = 0;
i < 5;
i++)
449 #define BUTTERFLIES(a0,a1,a2,a3) \
455 BF(t3, t5, t5, t1); \
456 BF(a2.re, a0.re, r0, t5); \
457 BF(a3.im, a1.im, i1, t3); \
458 BF(t4, t6, t2, t6); \
459 BF(a3.re, a1.re, r1, t4); \
460 BF(a2.im, a0.im, i0, t6); \
463 #define TRANSFORM(a0,a1,a2,a3,wre,wim) \
465 CMUL(t1, t2, a2.re, a2.im, wre, -wim); \
466 CMUL(t5, t6, a3.re, a3.im, wre, wim); \
467 BUTTERFLIES(a0, a1, a2, a3); \
479 for (
int i = 0;
i < n;
i += 4) {
480 TRANSFORM(z[0], z[o1 + 0], z[o2 + 0], z[o3 + 0], cos[0], wim[7]);
481 TRANSFORM(z[2], z[o1 + 2], z[o2 + 2], z[o3 + 2], cos[2], wim[5]);
482 TRANSFORM(z[4], z[o1 + 4], z[o2 + 4], z[o3 + 4], cos[4], wim[3]);
483 TRANSFORM(z[6], z[o1 + 6], z[o2 + 6], z[o3 + 6], cos[6], wim[1]);
485 TRANSFORM(z[1], z[o1 + 1], z[o2 + 1], z[o3 + 1], cos[1], wim[6]);
486 TRANSFORM(z[3], z[o1 + 3], z[o2 + 3], z[o3 + 3], cos[3], wim[4]);
487 TRANSFORM(z[5], z[o1 + 5], z[o2 + 5], z[o3 + 5], cos[5], wim[2]);
488 TRANSFORM(z[7], z[o1 + 7], z[o2 + 7], z[o3 + 7], cos[7], wim[0]);
496 #define DECL_FFT(n, n2, n4) \
497 static void fft##n(FFTComplex *z) \
502 split_radix_combine(z, TX_NAME(ff_cos_##n), n4/2); \
559 TRANSFORM(z[ 2], z[ 6], z[10], z[14], cos_16_2, cos_16_2);
560 TRANSFORM(z[ 1], z[ 5], z[ 9], z[13], cos_16_1, cos_16_3);
561 TRANSFORM(z[ 3], z[ 7], z[11], z[15], cos_16_3, cos_16_1);
579 NULL,
fft2,
fft4,
fft8,
fft16, fft32, fft64, fft128, fft256, fft512,
580 fft1024, fft2048, fft4096, fft8192, fft16384, fft32768, fft65536, fft131072
583 #define DECL_COMP_FFT(N) \
584 static void compound_fft_##N##xM(AVTXContext *s, void *_out, \
585 void *_in, ptrdiff_t stride) \
587 const int m = s->m, *in_map = s->pfatab, *out_map = in_map + N*m; \
588 FFTComplex *in = _in; \
589 FFTComplex *out = _out; \
590 FFTComplex fft##N##in[N]; \
591 void (*fftp)(FFTComplex *z) = fft_dispatch[av_log2(m)]; \
593 for (int i = 0; i < m; i++) { \
594 for (int j = 0; j < N; j++) \
595 fft##N##in[j] = in[in_map[i*N + j]]; \
596 fft##N(s->tmp + s->revtab_c[i], fft##N##in, m); \
599 for (int i = 0; i < N; i++) \
600 fftp(s->tmp + m*i); \
602 for (int i = 0; i < N*m; i++) \
603 out[i] = s->tmp[out_map[i]]; \
621 int src, dst, *inplace_idx =
s->inplace_idx;
623 src = *inplace_idx++;
627 dst =
s->revtab_c[
src];
630 dst =
s->revtab_c[dst];
631 }
while (dst !=
src);
633 }
while ((
src = *inplace_idx++));
635 for (
int i = 0;
i < m;
i++)
636 out[
i] = in[
s->revtab_c[
i]];
648 double phase =
s->inv ? 2.0*
M_PI/n : -2.0*
M_PI/n;
650 for(
int i = 0;
i < n;
i++) {
652 for(
int j = 0; j < n; j++) {
653 const double factor = phase*
i*j;
667 #define DECL_COMP_IMDCT(N) \
668 static void compound_imdct_##N##xM(AVTXContext *s, void *_dst, void *_src, \
671 FFTComplex fft##N##in[N]; \
672 FFTComplex *z = _dst, *exp = s->exptab; \
673 const int m = s->m, len8 = N*m >> 1; \
674 const int *in_map = s->pfatab, *out_map = in_map + N*m; \
675 const FFTSample *src = _src, *in1, *in2; \
676 void (*fftp)(FFTComplex *) = fft_dispatch[av_log2(m)]; \
678 stride /= sizeof(*src); \
680 in2 = src + ((N*m*2) - 1) * stride; \
682 for (int i = 0; i < m; i++) { \
683 for (int j = 0; j < N; j++) { \
684 const int k = in_map[i*N + j]; \
685 FFTComplex tmp = { in2[-k*stride], in1[k*stride] }; \
686 CMUL3(fft##N##in[j], tmp, exp[k >> 1]); \
688 fft##N(s->tmp + s->revtab_c[i], fft##N##in, m); \
691 for (int i = 0; i < N; i++) \
692 fftp(s->tmp + m*i); \
694 for (int i = 0; i < len8; i++) { \
695 const int i0 = len8 + i, i1 = len8 - i - 1; \
696 const int s0 = out_map[i0], s1 = out_map[i1]; \
697 FFTComplex src1 = { s->tmp[s1].im, s->tmp[s1].re }; \
698 FFTComplex src0 = { s->tmp[s0].im, s->tmp[s0].re }; \
700 CMUL(z[i1].re, z[i0].im, src1.re, src1.im, exp[i1].im, exp[i1].re); \
701 CMUL(z[i0].re, z[i1].im, src0.re, src0.im, exp[i0].im, exp[i0].re); \
711 #define DECL_COMP_MDCT(N) \
712 static void compound_mdct_##N##xM(AVTXContext *s, void *_dst, void *_src, \
715 FFTSample *src = _src, *dst = _dst; \
716 FFTComplex *exp = s->exptab, tmp, fft##N##in[N]; \
717 const int m = s->m, len4 = N*m, len3 = len4 * 3, len8 = len4 >> 1; \
718 const int *in_map = s->pfatab, *out_map = in_map + N*m; \
719 void (*fftp)(FFTComplex *) = fft_dispatch[av_log2(m)]; \
721 stride /= sizeof(*dst); \
723 for (int i = 0; i < m; i++) { \
724 for (int j = 0; j < N; j++) { \
725 const int k = in_map[i*N + j]; \
727 tmp.re = FOLD(-src[ len4 + k], src[1*len4 - 1 - k]); \
728 tmp.im = FOLD(-src[ len3 + k], -src[1*len3 - 1 - k]); \
730 tmp.re = FOLD(-src[ len4 + k], -src[5*len4 - 1 - k]); \
731 tmp.im = FOLD( src[-len4 + k], -src[1*len3 - 1 - k]); \
733 CMUL(fft##N##in[j].im, fft##N##in[j].re, tmp.re, tmp.im, \
734 exp[k >> 1].re, exp[k >> 1].im); \
736 fft##N(s->tmp + s->revtab_c[i], fft##N##in, m); \
739 for (int i = 0; i < N; i++) \
740 fftp(s->tmp + m*i); \
742 for (int i = 0; i < len8; i++) { \
743 const int i0 = len8 + i, i1 = len8 - i - 1; \
744 const int s0 = out_map[i0], s1 = out_map[i1]; \
745 FFTComplex src1 = { s->tmp[s1].re, s->tmp[s1].im }; \
746 FFTComplex src0 = { s->tmp[s0].re, s->tmp[s0].im }; \
748 CMUL(dst[2*i1*stride + stride], dst[2*i0*stride], src0.re, src0.im, \
749 exp[i0].im, exp[i0].re); \
750 CMUL(dst[2*i0*stride + stride], dst[2*i1*stride], src1.re, src1.im, \
751 exp[i1].im, exp[i1].re); \
765 const int m =
s->m, len8 = m >> 1;
773 for (
int i = 0;
i < m;
i++) {
780 for (
int i = 0;
i < len8;
i++) {
781 const int i0 = len8 +
i, i1 = len8 -
i - 1;
795 const int m =
s->m, len4 = m, len3 = len4 * 3, len8 = len4 >> 1;
800 for (
int i = 0;
i < m;
i++) {
803 tmp.re = FOLD(-
src[ len4 + k],
src[1*len4 - 1 - k]);
804 tmp.im = FOLD(-
src[ len3 + k], -
src[1*len3 - 1 - k]);
806 tmp.re = FOLD(-
src[ len4 + k], -
src[5*len4 - 1 - k]);
807 tmp.im = FOLD(
src[-len4 + k], -
src[1*len3 - 1 - k]);
815 for (
int i = 0;
i < len8;
i++) {
816 const int i0 = len8 +
i, i1 = len8 -
i - 1;
835 const double phase =
M_PI/(4.0*len2);
839 for (
int i = 0;
i <
len;
i++) {
842 double i_d = phase * (4*
len - 2*
i - 1);
843 double i_u = phase * (3*len2 + 2*
i + 1);
844 for (
int j = 0; j < len2; j++) {
845 double a = (2 * j + 1);
846 double a_d = cos(
a * i_d);
847 double a_u = cos(
a * i_u);
864 const double phase =
M_PI/(4.0*
len);
868 for (
int i = 0;
i <
len;
i++) {
870 for (
int j = 0; j <
len*2; j++) {
871 int a = (2*j + 1 +
len) * (2*
i + 1);
872 sum += UNSCALE(
src[j]) * cos(
a * phase);
881 int len =
s->m*
s->n*4;
886 s->top_tx(
s, dst + len4, _src,
stride);
890 for (
int i = 0;
i < len4;
i++) {
898 const double theta = (
scale < 0 ? len4 : 0) + 1.0/8.0;
904 for (
int i = 0;
i < len4;
i++) {
925 #define CHECK_FACTOR(DST, FACTOR, SRC) \
926 if (DST == 1 && !(SRC % FACTOR)) { \
938 if (!(
len & (
len - 1)) &&
len >= 2 &&
len <= max_ptwo) {
952 if (
len > 1 || m == 1) {
953 if (is_mdct && (l & 1))
961 s->scale = *((SCALE_TYPE *)
scale);
971 if (n > 1 && m > 1) {
976 if (!(m & (m - 1))) {
977 *tx = n == 3 ? compound_fft_3xM :
978 n == 5 ? compound_fft_5xM :
979 n == 7 ? compound_fft_7xM :
980 n == 9 ? compound_fft_9xM :
983 *tx = n == 3 ? inv ? compound_imdct_3xM : compound_mdct_3xM :
984 n == 5 ? inv ? compound_imdct_5xM : compound_mdct_5xM :
985 n == 7 ? inv ? compound_imdct_7xM : compound_mdct_7xM :
986 n == 9 ? inv ? compound_imdct_9xM : compound_mdct_9xM :
987 inv ? compound_imdct_15xM : compound_mdct_15xM;
995 if (n == 3 || n == 5 || n == 15)
1002 if (m != 1 && !(m & (m - 1))) {
int(* func)(AVBPrint *dst, const char *in, const char *arg)
static void monolithic_imdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void fft16(FFTComplex *z)
static void split_radix_fft(AVTXContext *s, void *_out, void *_in, ptrdiff_t stride)
#define TRANSFORM(a0, a1, a2, a3, wre, wim)
static void fft2(FFTComplex *z)
static void fft5(FFTComplex *out, FFTComplex *in, FFTComplex exptab[2])
static void fft4(FFTComplex *z)
#define DECL_FFT5(NAME, D0, D1, D2, D3, D4)
static CosTabsInitOnce cos_tabs_init_once[]
static av_cold void ff_init_9_tabs(void)
static const struct twinvq_data tab
static void sum_d(const int *input, int *output, int len)
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_always_inline float scale(float x, float s)
static int16_t mult(Float11 *f1, Float11 *f2)
static int ff_thread_once(char *control, void(*routine)(void))
#define FF_ARRAY_ELEMS(a)
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
static void fft8(FFTComplex *z)
int ff_tx_gen_compound_mapping(AVTXContext *s)
static int gen_mdct_exptab(AVTXContext *s, int len4, double scale)
static av_cold void ff_init_53_tabs(void)
@ AV_TX_FULL_IMDCT
Performs a full inverse MDCT rather than leaving out samples that can be derived through symmetry.
static __device__ float fabs(float a)
static void monolithic_mdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
int ff_tx_gen_ptwo_inplace_revtab_idx(AVTXContext *s, int *revtab)
@ AV_TX_INPLACE
Performs an in-place transformation on the input.
#define DECL_COMP_MDCT(N)
static av_always_inline void fft9(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)
#define CHECK_FACTOR(DST, FACTOR, SRC)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
int ff_tx_type_is_mdct(enum AVTXType type)
int TX_NAME() ff_tx_init_mdct_fft(AVTXContext *s, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
#define DECLARE_ALIGNED(n, t, v)
#define DECL_FFT(n, n2, n4)
#define i(width, name, range_min, range_max)
#define av_malloc_array(a, b)
static av_always_inline void fft7(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)
int ff_tx_gen_ptwo_revtab(AVTXContext *s, int invert_lookup)
static void split_radix_combine(FFTComplex *z, const FFTSample *cos, int n)
#define DECL_COMP_IMDCT(N)
static av_cold void init_cos_tabs(int index)
static av_always_inline void fft15(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)
static void full_imdct_wrapper_fn(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
static void(*const fft_dispatch[])(FFTComplex *)
#define FFSWAP(type, a, b)
static av_cold void ff_init_7_tabs(void)
#define INIT_FF_COS_TABS_FUNC(index, size)
static FFTSample *const cos_tabs[18]
#define BUTTERFLIES(a0, a1, a2, a3)
static const int factor[16]
FFTComplex TX_NAME(ff_cos_53)[4]
static av_always_inline void init_cos_tabs_idx(int index)
static const int16_t alpha[]
static void naive_fft(AVTXContext *s, void *_out, void *_in, ptrdiff_t stride)
static void naive_mdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
#define flags(name, subs,...)
static void naive_imdct(AVTXContext *s, void *_dst, void *_src, ptrdiff_t stride)
#define CMUL(dre, dim, are, aim, bre, bim)
static av_always_inline void fft3(FFTComplex *out, FFTComplex *in, ptrdiff_t stride)