Go to the documentation of this file.
41 #define CMUL3(c, a, b) CMUL((c).re, (c).im, (a).re, (a).im, (b).re, (b).im)
63 const int b_ptwo =
s->ptwo_fft.nbits;
64 const int l_ptwo = 1 << b_ptwo;
65 const int inv_1 = l_ptwo << ((4 - b_ptwo) & 3);
66 const int inv_2 = 0xeeeeeeef & ((1
U << b_ptwo) - 1);
69 if (!
s->pfa_prereindex)
73 if (!
s->pfa_postreindex)
77 for (
i = 0;
i < l_ptwo;
i++) {
78 for (j = 0; j < 15; j++) {
79 const int q_pre = ((l_ptwo * j)/15 +
i) >> b_ptwo;
80 const int q_post = (((j*inv_1)/15) + (
i*inv_2)) >> b_ptwo;
81 const int k_pre = 15*
i + (j - q_pre*15)*(1 << b_ptwo);
82 const int k_post =
i*inv_2*15 + j*inv_1 - 15*q_post*l_ptwo;
83 s->pfa_prereindex[
i*15 + j] = k_pre << 1;
84 s->pfa_postreindex[k_post] = l_ptwo*j +
i;
96 t[0].
re = in[3].
re + in[12].
re;
97 t[0].
im = in[3].
im + in[12].
im;
98 t[1].
im = in[3].
re - in[12].
re;
99 t[1].
re = in[3].
im - in[12].
im;
100 t[2].
re = in[6].
re + in[ 9].
re;
101 t[2].
im = in[6].
im + in[ 9].
im;
102 t[3].
im = in[6].
re - in[ 9].
re;
103 t[3].
re = in[6].
im - in[ 9].
im;
105 out[0].re = in[0].
re + in[3].
re + in[6].
re + in[9].
re + in[12].
re;
106 out[0].im = in[0].
im + in[3].
im + in[6].
im + in[9].
im + in[12].
im;
117 z0[0].
re = t[0].
re - t[1].
re;
118 z0[0].
im = t[0].
im - t[1].
im;
119 z0[1].
re = t[4].
re + t[5].
re;
120 z0[1].
im = t[4].
im + t[5].
im;
122 z0[2].
re = t[4].
re - t[5].
re;
123 z0[2].
im = t[4].
im - t[5].
im;
124 z0[3].
re = t[0].
re + t[1].
re;
125 z0[3].
im = t[0].
im + t[1].
im;
127 out[1].re = in[0].
re + z0[3].
re;
128 out[1].im = in[0].
im + z0[0].
im;
129 out[2].re = in[0].
re + z0[2].
re;
130 out[2].im = in[0].
im + z0[1].
im;
131 out[3].re = in[0].
re + z0[1].
re;
132 out[3].im = in[0].
im + z0[2].
im;
133 out[4].re = in[0].
re + z0[0].
re;
134 out[4].im = in[0].
im + z0[3].
im;
146 for (k = 0; k < 5; k++) {
169 const int len4 =
s->len4, len3 = len4 * 3, len8 = len4 >> 1;
170 const int l_ptwo = 1 <<
s->ptwo_fft.nbits;
174 for (
i = 0;
i < l_ptwo;
i++) {
175 for (j = 0; j < 15; j++) {
176 const int k =
s->pfa_prereindex[
i*15 + j];
179 tmp.re = -
src[ len4 + k] +
src[1*len4 - 1 - k];
180 tmp.im = -
src[ len3 + k] -
src[1*len3 - 1 - k];
182 tmp.re = -
src[ len4 + k] -
src[5*len4 - 1 - k];
183 tmp.im =
src[-len4 + k] -
src[1*len3 - 1 - k];
187 s->fft15(
s->tmp +
s->ptwo_fft.revtab[
i], fft15in,
s->exptab, l_ptwo);
191 for (
i = 0;
i < 15;
i++)
192 s->ptwo_fft.fft_calc(&
s->ptwo_fft,
s->tmp + l_ptwo*
i);
195 for (
i = 0;
i < len8;
i++) {
196 const int i0 = len8 +
i, i1 = len8 -
i - 1;
197 const int s0 =
s->pfa_postreindex[i0],
s1 =
s->pfa_postreindex[i1];
200 s->twiddle_exptab[i0].im,
s->twiddle_exptab[i0].re);
202 s->twiddle_exptab[i1].im,
s->twiddle_exptab[i1].re);
211 int i, j, len8 =
s->len4 >> 1, l_ptwo = 1 <<
s->ptwo_fft.nbits;
215 for (
i = 0;
i < l_ptwo;
i++) {
216 for (j = 0; j < 15; j++) {
217 const int k =
s->pfa_prereindex[
i*15 + j];
219 CMUL3(fft15in[j],
tmp,
s->twiddle_exptab[k >> 1]);
221 s->fft15(
s->tmp +
s->ptwo_fft.revtab[
i], fft15in,
s->exptab, l_ptwo);
225 for (
i = 0;
i < 15;
i++)
226 s->ptwo_fft.fft_calc(&
s->ptwo_fft,
s->tmp + l_ptwo*
i);
229 s->postreindex(z,
s->tmp,
s->twiddle_exptab,
s->pfa_postreindex, len8);
233 int *lut, ptrdiff_t len8)
238 for (
i = 0;
i < len8;
i++) {
239 const int i0 = len8 +
i, i1 = len8 -
i - 1;
240 const int s0 = lut[i0],
s1 = lut[i1];
251 int len2 = 15 * (1 <<
N);
256 if ((
N < 2) || (
N > 13))
283 if (!
s->twiddle_exptab)
286 theta = 0.125f + (
scale < 0 ?
s->len4 : 0);
288 for (
i = 0;
i <
s->len4;
i++) {
295 for (
i = 0;
i < 19;
i++) {
297 double theta = (2.0f *
M_PI *
i) / 15.0
f;
300 s->exptab[
i].re =
cosf(theta);
301 s->exptab[
i].im =
sinf(theta);
303 s->exptab[
i] =
s->exptab[
i - 15];
315 s->exptab[19].im *= -1;
316 s->exptab[20].im *= -1;
static struct @131 * exptab
static void imdct15_half(MDCT15Context *s, float *dst, const float *src, ptrdiff_t stride)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void fft15_c(FFTComplex *out, FFTComplex *in, FFTComplex *exptab, ptrdiff_t stride)
av_cold int ff_mdct15_init(MDCT15Context **ps, int inverse, int N, double scale)
static void fft5(FFTComplex *out, FFTComplex *in, FFTComplex exptab[2])
static av_always_inline float scale(float x, float s)
static int init_pfa_reindex_tabs(MDCT15Context *s)
static __device__ float fabs(float a)
static void postrotate_c(FFTComplex *out, FFTComplex *in, FFTComplex *exp, int *lut, ptrdiff_t len8)
static void mdct15(MDCT15Context *s, float *dst, const float *src, ptrdiff_t stride)
void ff_mdct15_init_x86(MDCT15Context *s)
#define i(width, name, range_min, range_max)
#define av_malloc_array(a, b)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
av_cold void ff_mdct15_uninit(MDCT15Context **ps)
static int inverse(AudioFWTDNContext *s, double **in, int *in_length, double *out, int out_length, int ch, uint64_t sn)
static const int16_t alpha[]
#define CMUL(dre, dim, are, aim, bre, bim)