FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ac3dsp_init.c
Go to the documentation of this file.
1 /*
2  * x86-optimized AC-3 DSP utils
3  * Copyright (c) 2011 Justin Ruggles
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/attributes.h"
23 #include "libavutil/mem.h"
24 #include "libavutil/x86/asm.h"
25 #include "libavutil/x86/cpu.h"
26 #include "dsputil_x86.h"
27 #include "libavcodec/ac3.h"
28 #include "libavcodec/ac3dsp.h"
29 
30 void ff_ac3_exponent_min_mmx (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
31 void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
32 void ff_ac3_exponent_min_sse2 (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
33 
34 int ff_ac3_max_msb_abs_int16_mmx (const int16_t *src, int len);
35 int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
36 int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
37 int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
38 
39 void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
40 void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
41 
42 void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
43 void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
44 
45 void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
46 void ff_float_to_fixed24_sse (int32_t *dst, const float *src, unsigned int len);
47 void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
48 
49 int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
50 
51 void ff_ac3_extract_exponents_3dnow(uint8_t *exp, int32_t *coef, int nb_coefs);
52 void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
53 void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
54 
55 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
56  const int16_t *window, unsigned int len);
57 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
58  const int16_t *window, unsigned int len);
59 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
60  const int16_t *window, unsigned int len);
61 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
62  const int16_t *window, unsigned int len);
63 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
64  const int16_t *window, unsigned int len);
65 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
66  const int16_t *window, unsigned int len);
67 
68 #if ARCH_X86_32 && defined(__INTEL_COMPILER)
69 # undef HAVE_7REGS
70 # define HAVE_7REGS 0
71 #endif
72 
73 #if HAVE_SSE_INLINE && HAVE_7REGS
74 
75 #define IF1(x) x
76 #define IF0(x)
77 
78 #define MIX5(mono, stereo) \
79  __asm__ volatile ( \
80  "movss 0(%1), %%xmm5 \n" \
81  "movss 8(%1), %%xmm6 \n" \
82  "movss 24(%1), %%xmm7 \n" \
83  "shufps $0, %%xmm5, %%xmm5 \n" \
84  "shufps $0, %%xmm6, %%xmm6 \n" \
85  "shufps $0, %%xmm7, %%xmm7 \n" \
86  "1: \n" \
87  "movaps (%0, %2), %%xmm0 \n" \
88  "movaps (%0, %3), %%xmm1 \n" \
89  "movaps (%0, %4), %%xmm2 \n" \
90  "movaps (%0, %5), %%xmm3 \n" \
91  "movaps (%0, %6), %%xmm4 \n" \
92  "mulps %%xmm5, %%xmm0 \n" \
93  "mulps %%xmm6, %%xmm1 \n" \
94  "mulps %%xmm5, %%xmm2 \n" \
95  "mulps %%xmm7, %%xmm3 \n" \
96  "mulps %%xmm7, %%xmm4 \n" \
97  stereo("addps %%xmm1, %%xmm0 \n") \
98  "addps %%xmm1, %%xmm2 \n" \
99  "addps %%xmm3, %%xmm0 \n" \
100  "addps %%xmm4, %%xmm2 \n" \
101  mono("addps %%xmm2, %%xmm0 \n") \
102  "movaps %%xmm0, (%0, %2) \n" \
103  stereo("movaps %%xmm2, (%0, %3) \n") \
104  "add $16, %0 \n" \
105  "jl 1b \n" \
106  : "+&r"(i) \
107  : "r"(matrix), \
108  "r"(samples[0] + len), \
109  "r"(samples[1] + len), \
110  "r"(samples[2] + len), \
111  "r"(samples[3] + len), \
112  "r"(samples[4] + len) \
113  : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
114  "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
115  "memory" \
116  );
117 
118 #define MIX_MISC(stereo) \
119  __asm__ volatile ( \
120  "mov %5, %2 \n" \
121  "1: \n" \
122  "mov -%c7(%6, %2, %c8), %3 \n" \
123  "movaps (%3, %0), %%xmm0 \n" \
124  stereo("movaps %%xmm0, %%xmm1 \n") \
125  "mulps %%xmm4, %%xmm0 \n" \
126  stereo("mulps %%xmm5, %%xmm1 \n") \
127  "2: \n" \
128  "mov (%6, %2, %c8), %1 \n" \
129  "movaps (%1, %0), %%xmm2 \n" \
130  stereo("movaps %%xmm2, %%xmm3 \n") \
131  "mulps (%4, %2, 8), %%xmm2 \n" \
132  stereo("mulps 16(%4, %2, 8), %%xmm3 \n") \
133  "addps %%xmm2, %%xmm0 \n" \
134  stereo("addps %%xmm3, %%xmm1 \n") \
135  "add $4, %2 \n" \
136  "jl 2b \n" \
137  "mov %5, %2 \n" \
138  stereo("mov (%6, %2, %c8), %1 \n") \
139  "movaps %%xmm0, (%3, %0) \n" \
140  stereo("movaps %%xmm1, (%1, %0) \n") \
141  "add $16, %0 \n" \
142  "jl 1b \n" \
143  : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m) \
144  : "r"(matrix_simd + in_ch), \
145  "g"((intptr_t) - 4 * (in_ch - 1)), \
146  "r"(samp + in_ch), \
147  "i"(sizeof(float *)), "i"(sizeof(float *)/4) \
148  : "memory" \
149  );
150 
151 static void ac3_downmix_sse(float **samples, float (*matrix)[2],
152  int out_ch, int in_ch, int len)
153 {
154  int (*matrix_cmp)[2] = (int(*)[2])matrix;
155  intptr_t i, j, k, m;
156 
157  i = -len * sizeof(float);
158  if (in_ch == 5 && out_ch == 2 &&
159  !(matrix_cmp[0][1] | matrix_cmp[2][0] |
160  matrix_cmp[3][1] | matrix_cmp[4][0] |
161  (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
162  (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
163  MIX5(IF0, IF1);
164  } else if (in_ch == 5 && out_ch == 1 &&
165  matrix_cmp[0][0] == matrix_cmp[2][0] &&
166  matrix_cmp[3][0] == matrix_cmp[4][0]) {
167  MIX5(IF1, IF0);
168  } else {
169  DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
170  float *samp[AC3_MAX_CHANNELS];
171 
172  for (j = 0; j < in_ch; j++)
173  samp[j] = samples[j] + len;
174 
175  j = 2 * in_ch * sizeof(float);
176  __asm__ volatile (
177  "1: \n"
178  "sub $8, %0 \n"
179  "movss (%2, %0), %%xmm4 \n"
180  "movss 4(%2, %0), %%xmm5 \n"
181  "shufps $0, %%xmm4, %%xmm4 \n"
182  "shufps $0, %%xmm5, %%xmm5 \n"
183  "movaps %%xmm4, (%1, %0, 4) \n"
184  "movaps %%xmm5, 16(%1, %0, 4) \n"
185  "jg 1b \n"
186  : "+&r"(j)
187  : "r"(matrix_simd), "r"(matrix)
188  : "memory"
189  );
190  if (out_ch == 2) {
191  MIX_MISC(IF1);
192  } else {
193  MIX_MISC(IF0);
194  }
195  }
196 }
197 
198 #endif /* HAVE_SSE_INLINE && HAVE_7REGS */
199 
201 {
202  int cpu_flags = av_get_cpu_flags();
203 
204  if (EXTERNAL_MMX(cpu_flags)) {
209  }
210  if (EXTERNAL_AMD3DNOW(cpu_flags)) {
211  if (!bit_exact) {
213  }
214  }
215  if (EXTERNAL_MMXEXT(cpu_flags)) {
218  if (bit_exact) {
220  } else {
222  }
223  }
224  if (EXTERNAL_SSE(cpu_flags)) {
226  }
227  if (EXTERNAL_SSE2(cpu_flags)) {
233  if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
236  }
237  if (bit_exact) {
239  } else if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
241  }
242  }
243  if (EXTERNAL_SSSE3(cpu_flags)) {
245  if (cpu_flags & AV_CPU_FLAG_ATOM) {
247  } else {
250  }
251  }
252 
253 #if HAVE_SSE_INLINE && HAVE_7REGS
254  if (INLINE_SSE(cpu_flags)) {
255  c->downmix = ac3_downmix_sse;
256  }
257 #endif
258 }