38 uint64_t accu0 = 0, accu1 = 0, accu2 = 0, accu3 = 0;
42 for (i = 0; i <
n; i += 2) {
45 accu0 += (int64_t)x[i + 0][0] * x[i + 0][0];
47 accu1 += (int64_t)x[i + 0][1] * x[i + 0][1];
49 accu2 += (int64_t)x[i + 1][0] * x[i + 1][0];
51 accu3 += (int64_t)x[i + 1][1] * x[i + 1][1];
55 while ((accu0|accu1|accu2|accu3) >> 62) {
62 accu = accu0 + accu1 + accu2 + accu3;
67 while (u < 0x80000000U) {
74 round = 1ULL << (nz-1);
75 u = ((accu +
round) >> nz);
85 for (i = 1; i < 64; i += 2)
94 for (k = 1; k < 32; k++) {
95 z[64+2*k ] = -z[64 - k];
96 z[64+2*k+1] = z[ k + 1];
103 for (k = 0; k < 32; k++) {
112 for (i = 0; i < 32; i++) {
113 v[ i] = ( src[63 - 2*i ] + 0x10) >> 5;
114 v[63 - i] = (-src[63 - 2*i - 1] + 0x10) >> 5;
122 int i = (
int)(accu >> 32);
127 while (
FFABS(i) < 0x40000000) {
134 round = 1
U << (nz-1);
135 mant = (
int)((accu + round) >> nz);
136 mant = (mant + 0x40LL)>>7;
145 int64_t real_sum, imag_sum;
146 int64_t accu_re = 0, accu_im = 0;
149 for (i = 1; i < 38; i++) {
150 accu_re += (uint64_t)x[i][0] * x[i+lag][0];
151 accu_re += (uint64_t)x[i][1] * x[i+lag][1];
152 accu_im += (uint64_t)x[i][0] * x[i+lag][1];
153 accu_im -= (uint64_t)x[i][1] * x[i+lag][0];
159 accu_re += (uint64_t)x[ 0][0] * x[lag][0];
160 accu_re += (uint64_t)x[ 0][1] * x[lag][1];
161 accu_im += (uint64_t)x[ 0][0] * x[lag][1];
162 accu_im -= (uint64_t)x[ 0][1] * x[lag][0];
170 accu_re += (uint64_t)x[38][0] * x[39][0];
171 accu_re += (uint64_t)x[38][1] * x[39][1];
172 accu_im += (uint64_t)x[38][0] * x[39][1];
173 accu_im -= (uint64_t)x[38][1] * x[39][0];
179 for (i = 1; i < 38; i++) {
180 accu_re += (uint64_t)x[i][0] * x[i][0];
181 accu_re += (uint64_t)x[i][1] * x[i][1];
184 accu_re += (uint64_t)x[ 0][0] * x[ 0][0];
185 accu_re += (uint64_t)x[ 0][1] * x[ 0][1];
190 accu_re += (uint64_t)x[38][0] * x[38][0];
191 accu_re += (uint64_t)x[38][1] * x[38][1];
205 const int alpha0[2],
const int alpha1[2],
212 accu = (int64_t)alpha0[0] * bw;
213 alpha[2] = (
int)((accu + 0x40000000) >> 31);
214 accu = (int64_t)alpha0[1] * bw;
215 alpha[3] = (
int)((accu + 0x40000000) >> 31);
216 accu = (int64_t)bw * bw;
217 bw = (
int)((accu + 0x40000000) >> 31);
218 accu = (int64_t)alpha1[0] * bw;
219 alpha[0] = (
int)((accu + 0x40000000) >> 31);
220 accu = (int64_t)alpha1[1] * bw;
221 alpha[1] = (
int)((accu + 0x40000000) >> 31);
223 for (i = start; i <
end; i++) {
224 accu = (int64_t)X_low[i][0] * 0x20000000;
225 accu += (int64_t)X_low[i - 2][0] * alpha[0];
226 accu -= (int64_t)X_low[i - 2][1] * alpha[1];
227 accu += (int64_t)X_low[i - 1][0] * alpha[2];
228 accu -= (int64_t)X_low[i - 1][1] * alpha[3];
229 X_high[i][0] = (
int)((accu + 0x10000000) >> 29);
231 accu = (int64_t)X_low[i][1] * 0x20000000;
232 accu += (int64_t)X_low[i - 2][1] * alpha[0];
233 accu += (int64_t)X_low[i - 2][0] * alpha[1];
234 accu += (int64_t)X_low[i - 1][1] * alpha[2];
235 accu += (int64_t)X_low[i - 1][0] * alpha[3];
236 X_high[i][1] = (
int)((accu + 0x10000000) >> 29);
241 const SoftFloat *g_filt,
int m_max, intptr_t ixh)
246 for (m = 0; m < m_max; m++) {
247 if (22 - g_filt[m].
exp < 61) {
248 int64_t
r = 1LL << (22-g_filt[m].
exp);
249 accu = (int64_t)X_high[m][ixh][0] * ((g_filt[m].mant + 0x40)>>7);
250 Y[m][0] = (
int)((accu + r) >> (23-g_filt[m].
exp));
252 accu = (int64_t)X_high[m][ixh][1] * ((g_filt[m].mant + 0x40)>>7);
253 Y[m][1] = (
int)((accu + r) >> (23-g_filt[m].
exp));
268 for (m = 0; m < m_max; m++) {
269 unsigned y0 =
Y[m][0];
270 unsigned y1 =
Y[m][1];
271 noise = (noise + 1) & 0x1ff;
275 shift = 22 - s_m[m].
exp;
279 }
else if (shift < 30) {
280 round = 1 << (shift-1);
281 y0 += (s_m[m].
mant * phi_sign0 +
round) >> shift;
282 y1 += (s_m[m].
mant * phi_sign1 +
round) >> shift;
288 shift = 22 - q_filt[m].
exp;
292 }
else if (shift < 30) {
293 round = 1 << (shift-1);
295 accu = (int64_t)q_filt[m].mant * ff_sbr_noise_table_fixed[noise][0];
296 tmp = (
int)((accu + 0x40000000) >> 31);
297 y0 += (tmp +
round) >> shift;
299 accu = (int64_t)q_filt[m].mant * ff_sbr_noise_table_fixed[noise][1];
300 tmp = (
int)((accu + 0x40000000) >> 31);
301 y1 += (tmp +
round) >> shift;
306 phi_sign1 = -phi_sign1;
static int shift(int a, int b)
static av_always_inline SoftFloat autocorr_calc(int64_t accu)
static void sbr_autocorrelate_c(const int x[40][2], SoftFloat phi[3][2][2])
Macro definitions for various function/variable attributes.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static void sbr_hf_g_filt_c(int(*Y)[2], const int(*X_high)[40][2], const SoftFloat *g_filt, int m_max, intptr_t ixh)
static av_cold int end(AVCodecContext *avctx)
#define u(width, name, range_min, range_max)
static SoftFloat sbr_sum_square_c(int(*x)[2], int n)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static av_always_inline void autocorrelate(const int x[40][2], SoftFloat phi[3][2][2], int lag)
static av_always_inline av_const double round(double x)
static av_always_inline int sbr_hf_apply_noise(int(*Y)[2], const SoftFloat *s_m, const SoftFloat *q_filt, int noise, int phi_sign0, int phi_sign1, int m_max)
static void sbr_qmf_post_shuffle_c(int W[32][2], const int *z)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
AAC definitions and structures.
static void sbr_neg_odd_64_c(int *x)
static const int16_t alpha[]
static void sbr_qmf_pre_shuffle_c(int *z)
static int noise(AVBSFContext *ctx, AVPacket *pkt)
static void sbr_qmf_deint_neg_c(int *v, const int *src)
static av_const SoftFloat av_int2sf(int v, int frac_bits)
Converts a mantisse and exponent to a SoftFloat.
static void sbr_hf_gen_c(int(*X_high)[2], const int(*X_low)[2], const int alpha0[2], const int alpha1[2], int bw, int start, int end)