FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
float_dsp_mips.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012
3  * MIPS Technologies, Inc., California.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
14  * contributors may be used to endorse or promote products derived from
15  * this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Author: Branimir Vasic (bvasic@mips.com)
30  * Author: Zoran Lukic (zoranl@mips.com)
31  *
32  * This file is part of FFmpeg.
33  *
34  * FFmpeg is free software; you can redistribute it and/or
35  * modify it under the terms of the GNU Lesser General Public
36  * License as published by the Free Software Foundation; either
37  * version 2.1 of the License, or (at your option) any later version.
38  *
39  * FFmpeg is distributed in the hope that it will be useful,
40  * but WITHOUT ANY WARRANTY; without even the implied warranty of
41  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
42  * Lesser General Public License for more details.
43  *
44  * You should have received a copy of the GNU Lesser General Public
45  * License along with FFmpeg; if not, write to the Free Software
46  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
47  */
48 
49 /**
50  * @file
51  * Reference: libavutil/float_dsp.c
52  */
53 
54 #include "config.h"
55 #include "libavutil/float_dsp.h"
56 #include "libavutil/mips/asmdefs.h"
57 
58 #if HAVE_INLINE_ASM && HAVE_MIPSFPU
59 static void vector_fmul_mips(float *dst, const float *src0, const float *src1,
60  int len)
61 {
62  int i;
63 
64  if (len & 3) {
65  for (i = 0; i < len; i++)
66  dst[i] = src0[i] * src1[i];
67  } else {
68  float *d = (float *)dst;
69  float *d_end = d + len;
70  float *s0 = (float *)src0;
71  float *s1 = (float *)src1;
72 
73  float src0_0, src0_1, src0_2, src0_3;
74  float src1_0, src1_1, src1_2, src1_3;
75 
76  __asm__ volatile (
77  "1: \n\t"
78  "lwc1 %[src0_0], 0(%[s0]) \n\t"
79  "lwc1 %[src1_0], 0(%[s1]) \n\t"
80  "lwc1 %[src0_1], 4(%[s0]) \n\t"
81  "lwc1 %[src1_1], 4(%[s1]) \n\t"
82  "lwc1 %[src0_2], 8(%[s0]) \n\t"
83  "lwc1 %[src1_2], 8(%[s1]) \n\t"
84  "lwc1 %[src0_3], 12(%[s0]) \n\t"
85  "lwc1 %[src1_3], 12(%[s1]) \n\t"
86  "mul.s %[src0_0], %[src0_0], %[src1_0] \n\t"
87  "mul.s %[src0_1], %[src0_1], %[src1_1] \n\t"
88  "mul.s %[src0_2], %[src0_2], %[src1_2] \n\t"
89  "mul.s %[src0_3], %[src0_3], %[src1_3] \n\t"
90  "swc1 %[src0_0], 0(%[d]) \n\t"
91  "swc1 %[src0_1], 4(%[d]) \n\t"
92  "swc1 %[src0_2], 8(%[d]) \n\t"
93  "swc1 %[src0_3], 12(%[d]) \n\t"
94  PTR_ADDIU "%[s0], %[s0], 16 \n\t"
95  PTR_ADDIU "%[s1], %[s1], 16 \n\t"
96  PTR_ADDIU "%[d], %[d], 16 \n\t"
97  "bne %[d], %[d_end], 1b \n\t"
98 
99  : [src0_0]"=&f"(src0_0), [src0_1]"=&f"(src0_1),
100  [src0_2]"=&f"(src0_2), [src0_3]"=&f"(src0_3),
101  [src1_0]"=&f"(src1_0), [src1_1]"=&f"(src1_1),
102  [src1_2]"=&f"(src1_2), [src1_3]"=&f"(src1_3),
103  [d]"+r"(d), [s0]"+r"(s0), [s1]"+r"(s1)
104  : [d_end]"r"(d_end)
105  : "memory"
106  );
107  }
108 }
109 
110 static void vector_fmul_scalar_mips(float *dst, const float *src, float mul,
111  int len)
112 {
113  float temp0, temp1, temp2, temp3;
114  float *local_src = (float*)src;
115  float *end = local_src + len;
116 
117  /* loop unrolled 4 times */
118  __asm__ volatile(
119  ".set push \n\t"
120  ".set noreorder \n\t"
121  "1: \n\t"
122  "lwc1 %[temp0], 0(%[src]) \n\t"
123  "lwc1 %[temp1], 4(%[src]) \n\t"
124  "lwc1 %[temp2], 8(%[src]) \n\t"
125  "lwc1 %[temp3], 12(%[src]) \n\t"
126  PTR_ADDIU "%[dst], %[dst], 16 \n\t"
127  "mul.s %[temp0], %[temp0], %[mul] \n\t"
128  "mul.s %[temp1], %[temp1], %[mul] \n\t"
129  "mul.s %[temp2], %[temp2], %[mul] \n\t"
130  "mul.s %[temp3], %[temp3], %[mul] \n\t"
131  PTR_ADDIU "%[src], %[src], 16 \n\t"
132  "swc1 %[temp0], -16(%[dst]) \n\t"
133  "swc1 %[temp1], -12(%[dst]) \n\t"
134  "swc1 %[temp2], -8(%[dst]) \n\t"
135  "bne %[src], %[end], 1b \n\t"
136  " swc1 %[temp3], -4(%[dst]) \n\t"
137  ".set pop \n\t"
138 
139  : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
140  [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
141  [dst]"+r"(dst), [src]"+r"(local_src)
142  : [end]"r"(end), [mul]"f"(mul)
143  : "memory"
144  );
145 }
146 
147 static void vector_fmul_window_mips(float *dst, const float *src0,
148  const float *src1, const float *win, int len)
149 {
150  float * dst_j, *win_j, *src0_i, *src1_j, *dst_i, *win_i;
151  float temp, temp1, temp2, temp3;
152  float s0, s01, s1, s11;
153  float wi, wi1, wi2, wi3;
154  float wj, wj1, wj2, wj3;
155  const float * lp_end = win + len;
156 
157  win_i = (float *)win;
158  win_j = (float *)(win + 2 * len -1);
159  src1_j = (float *)(src1 + len - 1);
160  src0_i = (float *)src0;
161  dst_i = (float *)dst;
162  dst_j = (float *)(dst + 2 * len -1);
163 
164  /* loop unrolled 4 times */
165  __asm__ volatile (
166  "1:"
167  "lwc1 %[s1], 0(%[src1_j]) \n\t"
168  "lwc1 %[wi], 0(%[win_i]) \n\t"
169  "lwc1 %[wj], 0(%[win_j]) \n\t"
170  "lwc1 %[s11], -4(%[src1_j]) \n\t"
171  "lwc1 %[wi1], 4(%[win_i]) \n\t"
172  "lwc1 %[wj1], -4(%[win_j]) \n\t"
173  "lwc1 %[s0], 0(%[src0_i]) \n\t"
174  "lwc1 %[s01], 4(%[src0_i]) \n\t"
175  "mul.s %[temp], %[s1], %[wi] \n\t"
176  "mul.s %[temp1], %[s1], %[wj] \n\t"
177  "mul.s %[temp2], %[s11], %[wi1] \n\t"
178  "mul.s %[temp3], %[s11], %[wj1] \n\t"
179  "lwc1 %[s1], -8(%[src1_j]) \n\t"
180  "lwc1 %[wi2], 8(%[win_i]) \n\t"
181  "lwc1 %[wj2], -8(%[win_j]) \n\t"
182  "lwc1 %[s11], -12(%[src1_j]) \n\t"
183  "msub.s %[temp], %[temp], %[s0], %[wj] \n\t"
184  "madd.s %[temp1], %[temp1], %[s0], %[wi] \n\t"
185  "msub.s %[temp2], %[temp2], %[s01], %[wj1] \n\t"
186  "madd.s %[temp3], %[temp3], %[s01], %[wi1] \n\t"
187  "lwc1 %[wi3], 12(%[win_i]) \n\t"
188  "lwc1 %[wj3], -12(%[win_j]) \n\t"
189  "lwc1 %[s0], 8(%[src0_i]) \n\t"
190  "lwc1 %[s01], 12(%[src0_i]) \n\t"
191  "addiu %[src1_j],-16 \n\t"
192  "addiu %[win_i], 16 \n\t"
193  "addiu %[win_j], -16 \n\t"
194  "addiu %[src0_i], 16 \n\t"
195  "swc1 %[temp], 0(%[dst_i]) \n\t" /* dst[i] = s0*wj - s1*wi; */
196  "swc1 %[temp1], 0(%[dst_j]) \n\t" /* dst[j] = s0*wi + s1*wj; */
197  "swc1 %[temp2], 4(%[dst_i]) \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
198  "swc1 %[temp3], -4(%[dst_j]) \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
199  "mul.s %[temp], %[s1], %[wi2] \n\t"
200  "mul.s %[temp1], %[s1], %[wj2] \n\t"
201  "mul.s %[temp2], %[s11], %[wi3] \n\t"
202  "mul.s %[temp3], %[s11], %[wj3] \n\t"
203  "msub.s %[temp], %[temp], %[s0], %[wj2] \n\t"
204  "madd.s %[temp1], %[temp1], %[s0], %[wi2] \n\t"
205  "msub.s %[temp2], %[temp2], %[s01], %[wj3] \n\t"
206  "madd.s %[temp3], %[temp3], %[s01], %[wi3] \n\t"
207  "swc1 %[temp], 8(%[dst_i]) \n\t" /* dst[i+2] = s0*wj2 - s1*wi2; */
208  "swc1 %[temp1], -8(%[dst_j]) \n\t" /* dst[j-2] = s0*wi2 + s1*wj2; */
209  "swc1 %[temp2], 12(%[dst_i]) \n\t" /* dst[i+2] = s01*wj3 - s11*wi3; */
210  "swc1 %[temp3], -12(%[dst_j]) \n\t" /* dst[j-3] = s01*wi3 + s11*wj3; */
211  "addiu %[dst_i], 16 \n\t"
212  "addiu %[dst_j], -16 \n\t"
213  "bne %[win_i], %[lp_end], 1b \n\t"
214  : [temp]"=&f"(temp), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
215  [temp3]"=&f"(temp3), [src0_i]"+r"(src0_i), [win_i]"+r"(win_i),
216  [src1_j]"+r"(src1_j), [win_j]"+r"(win_j), [dst_i]"+r"(dst_i),
217  [dst_j]"+r"(dst_j), [s0] "=&f"(s0), [s01]"=&f"(s01), [s1] "=&f"(s1),
218  [s11]"=&f"(s11), [wi] "=&f"(wi), [wj] "=&f"(wj), [wi2]"=&f"(wi2),
219  [wj2]"=&f"(wj2), [wi3]"=&f"(wi3), [wj3]"=&f"(wj3), [wi1]"=&f"(wi1),
220  [wj1]"=&f"(wj1)
221  : [lp_end]"r"(lp_end)
222  : "memory"
223  );
224 }
225 
226 static void butterflies_float_mips(float *av_restrict v1, float *av_restrict v2,
227  int len)
228 {
229  float temp0, temp1, temp2, temp3, temp4;
230  float temp5, temp6, temp7, temp8, temp9;
231  float temp10, temp11, temp12, temp13, temp14, temp15;
232  int pom;
233  pom = (len >> 2)-1;
234 
235  /* loop unrolled 4 times */
236  __asm__ volatile (
237  "lwc1 %[temp0], 0(%[v1]) \n\t"
238  "lwc1 %[temp1], 4(%[v1]) \n\t"
239  "lwc1 %[temp2], 8(%[v1]) \n\t"
240  "lwc1 %[temp3], 12(%[v1]) \n\t"
241  "lwc1 %[temp4], 0(%[v2]) \n\t"
242  "lwc1 %[temp5], 4(%[v2]) \n\t"
243  "lwc1 %[temp6], 8(%[v2]) \n\t"
244  "lwc1 %[temp7], 12(%[v2]) \n\t"
245  "beq %[pom], $zero, 2f \n\t"
246  "1: \n\t"
247  "sub.s %[temp8], %[temp0], %[temp4] \n\t"
248  "add.s %[temp9], %[temp0], %[temp4] \n\t"
249  "sub.s %[temp10], %[temp1], %[temp5] \n\t"
250  "add.s %[temp11], %[temp1], %[temp5] \n\t"
251  "sub.s %[temp12], %[temp2], %[temp6] \n\t"
252  "add.s %[temp13], %[temp2], %[temp6] \n\t"
253  "sub.s %[temp14], %[temp3], %[temp7] \n\t"
254  "add.s %[temp15], %[temp3], %[temp7] \n\t"
255  PTR_ADDIU "%[v1], %[v1], 16 \n\t"
256  PTR_ADDIU "%[v2], %[v2], 16 \n\t"
257  "addiu %[pom], %[pom], -1 \n\t"
258  "lwc1 %[temp0], 0(%[v1]) \n\t"
259  "lwc1 %[temp1], 4(%[v1]) \n\t"
260  "lwc1 %[temp2], 8(%[v1]) \n\t"
261  "lwc1 %[temp3], 12(%[v1]) \n\t"
262  "lwc1 %[temp4], 0(%[v2]) \n\t"
263  "lwc1 %[temp5], 4(%[v2]) \n\t"
264  "lwc1 %[temp6], 8(%[v2]) \n\t"
265  "lwc1 %[temp7], 12(%[v2]) \n\t"
266  "swc1 %[temp9], -16(%[v1]) \n\t"
267  "swc1 %[temp8], -16(%[v2]) \n\t"
268  "swc1 %[temp11], -12(%[v1]) \n\t"
269  "swc1 %[temp10], -12(%[v2]) \n\t"
270  "swc1 %[temp13], -8(%[v1]) \n\t"
271  "swc1 %[temp12], -8(%[v2]) \n\t"
272  "swc1 %[temp15], -4(%[v1]) \n\t"
273  "swc1 %[temp14], -4(%[v2]) \n\t"
274  "bgtz %[pom], 1b \n\t"
275  "2: \n\t"
276  "sub.s %[temp8], %[temp0], %[temp4] \n\t"
277  "add.s %[temp9], %[temp0], %[temp4] \n\t"
278  "sub.s %[temp10], %[temp1], %[temp5] \n\t"
279  "add.s %[temp11], %[temp1], %[temp5] \n\t"
280  "sub.s %[temp12], %[temp2], %[temp6] \n\t"
281  "add.s %[temp13], %[temp2], %[temp6] \n\t"
282  "sub.s %[temp14], %[temp3], %[temp7] \n\t"
283  "add.s %[temp15], %[temp3], %[temp7] \n\t"
284  "swc1 %[temp9], 0(%[v1]) \n\t"
285  "swc1 %[temp8], 0(%[v2]) \n\t"
286  "swc1 %[temp11], 4(%[v1]) \n\t"
287  "swc1 %[temp10], 4(%[v2]) \n\t"
288  "swc1 %[temp13], 8(%[v1]) \n\t"
289  "swc1 %[temp12], 8(%[v2]) \n\t"
290  "swc1 %[temp15], 12(%[v1]) \n\t"
291  "swc1 %[temp14], 12(%[v2]) \n\t"
292 
293  : [v1]"+r"(v1), [v2]"+r"(v2), [pom]"+r"(pom), [temp0] "=&f" (temp0),
294  [temp1]"=&f"(temp1), [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
295  [temp4]"=&f"(temp4), [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
296  [temp7]"=&f"(temp7), [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
297  [temp10]"=&f"(temp10), [temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
298  [temp13]"=&f"(temp13), [temp14]"=&f"(temp14), [temp15]"=&f"(temp15)
299  :
300  : "memory"
301  );
302 }
303 
304 static void vector_fmul_reverse_mips(float *dst, const float *src0, const float *src1, int len){
305  int i;
306  float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
307  src1 += len-1;
308 
309  for(i=0; i<(len>>2); i++)
310  {
311  /* loop unrolled 4 times */
312  __asm__ volatile(
313  "lwc1 %[temp0], 0(%[src0]) \n\t"
314  "lwc1 %[temp1], 0(%[src1]) \n\t"
315  "lwc1 %[temp2], 4(%[src0]) \n\t"
316  "lwc1 %[temp3], -4(%[src1]) \n\t"
317  "lwc1 %[temp4], 8(%[src0]) \n\t"
318  "lwc1 %[temp5], -8(%[src1]) \n\t"
319  "lwc1 %[temp6], 12(%[src0]) \n\t"
320  "lwc1 %[temp7], -12(%[src1]) \n\t"
321  "mul.s %[temp0], %[temp1], %[temp0] \n\t"
322  "mul.s %[temp2], %[temp3], %[temp2] \n\t"
323  "mul.s %[temp4], %[temp5], %[temp4] \n\t"
324  "mul.s %[temp6], %[temp7], %[temp6] \n\t"
325  PTR_ADDIU "%[src0], %[src0], 16 \n\t"
326  PTR_ADDIU "%[src1], %[src1], -16 \n\t"
327  PTR_ADDIU "%[dst], %[dst], 16 \n\t"
328  "swc1 %[temp0], -16(%[dst]) \n\t"
329  "swc1 %[temp2], -12(%[dst]) \n\t"
330  "swc1 %[temp4], -8(%[dst]) \n\t"
331  "swc1 %[temp6], -4(%[dst]) \n\t"
332 
333  : [dst]"+r"(dst), [src0]"+r"(src0), [src1]"+r"(src1),
334  [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),[temp2]"=&f"(temp2),
335  [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
336  [temp6]"=&f"(temp6), [temp7]"=&f"(temp7)
337  :
338  : "memory"
339  );
340  }
341 }
342 #endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
343 
345 #if HAVE_INLINE_ASM && HAVE_MIPSFPU
346  fdsp->vector_fmul = vector_fmul_mips;
347  fdsp->vector_fmul_scalar = vector_fmul_scalar_mips;
348  fdsp->vector_fmul_window = vector_fmul_window_mips;
349  fdsp->butterflies_float = butterflies_float_mips;
350  fdsp->vector_fmul_reverse = vector_fmul_reverse_mips;
351 #endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
352 }