FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
resample_mmx.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/x86/asm.h"
22 #include "libavutil/cpu.h"
24 
25 DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
26 
27 #define COMMON_CORE_INT16_MMX2 \
28  x86_reg len= -2*c->filter_length;\
29 __asm__ volatile(\
30  "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
31  "1: \n\t"\
32  "movq (%1, %0), %%mm1 \n\t"\
33  "pmaddwd (%2, %0), %%mm1 \n\t"\
34  "paddd %%mm1, %%mm0 \n\t"\
35  "add $8, %0 \n\t"\
36  " js 1b \n\t"\
37  "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
38  "paddd %%mm1, %%mm0 \n\t"\
39  "psrad $15, %%mm0 \n\t"\
40  "packssdw %%mm0, %%mm0 \n\t"\
41  "movd %%mm0, (%3) \n\t"\
42  : "+r" (len)\
43  : "r" (((uint8_t*)(src+sample_index))-len),\
44  "r" (((uint8_t*)filter)-len),\
45  "r" (dst+dst_index)\
46  NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
47 );
48 
49 #define LINEAR_CORE_INT16_MMX2 \
50  x86_reg len= -2*c->filter_length;\
51 __asm__ volatile(\
52  "pxor %%mm0, %%mm0 \n\t"\
53  "pxor %%mm2, %%mm2 \n\t"\
54  "1: \n\t"\
55  "movq (%3, %0), %%mm1 \n\t"\
56  "movq %%mm1, %%mm3 \n\t"\
57  "pmaddwd (%4, %0), %%mm1 \n\t"\
58  "pmaddwd (%5, %0), %%mm3 \n\t"\
59  "paddd %%mm1, %%mm0 \n\t"\
60  "paddd %%mm3, %%mm2 \n\t"\
61  "add $8, %0 \n\t"\
62  " js 1b \n\t"\
63  "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
64  "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
65  "paddd %%mm1, %%mm0 \n\t"\
66  "paddd %%mm3, %%mm2 \n\t"\
67  "movd %%mm0, %1 \n\t"\
68  "movd %%mm2, %2 \n\t"\
69  : "+r" (len),\
70  "=r" (val),\
71  "=r" (v2)\
72  : "r" (((uint8_t*)(src+sample_index))-len),\
73  "r" (((uint8_t*)filter)-len),\
74  "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
75 );
76 
77 #define COMMON_CORE_INT16_SSE2 \
78  x86_reg len= -2*c->filter_length;\
79 __asm__ volatile(\
80  "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
81  "1: \n\t"\
82  "movdqu (%1, %0), %%xmm1 \n\t"\
83  "pmaddwd (%2, %0), %%xmm1 \n\t"\
84  "paddd %%xmm1, %%xmm0 \n\t"\
85  "add $16, %0 \n\t"\
86  " js 1b \n\t"\
87  "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
88  "paddd %%xmm1, %%xmm0 \n\t"\
89  "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
90  "paddd %%xmm1, %%xmm0 \n\t"\
91  "psrad $15, %%xmm0 \n\t"\
92  "packssdw %%xmm0, %%xmm0 \n\t"\
93  "movd %%xmm0, (%3) \n\t"\
94  : "+r" (len)\
95  : "r" (((uint8_t*)(src+sample_index))-len),\
96  "r" (((uint8_t*)filter)-len),\
97  "r" (dst+dst_index)\
98  NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
99  XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
100 );
101 
102 #define LINEAR_CORE_INT16_SSE2 \
103  x86_reg len= -2*c->filter_length;\
104 __asm__ volatile(\
105  "pxor %%xmm0, %%xmm0 \n\t"\
106  "pxor %%xmm2, %%xmm2 \n\t"\
107  "1: \n\t"\
108  "movdqu (%3, %0), %%xmm1 \n\t"\
109  "movdqa %%xmm1, %%xmm3 \n\t"\
110  "pmaddwd (%4, %0), %%xmm1 \n\t"\
111  "pmaddwd (%5, %0), %%xmm3 \n\t"\
112  "paddd %%xmm1, %%xmm0 \n\t"\
113  "paddd %%xmm3, %%xmm2 \n\t"\
114  "add $16, %0 \n\t"\
115  " js 1b \n\t"\
116  "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
117  "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
118  "paddd %%xmm1, %%xmm0 \n\t"\
119  "paddd %%xmm3, %%xmm2 \n\t"\
120  "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
121  "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
122  "paddd %%xmm1, %%xmm0 \n\t"\
123  "paddd %%xmm3, %%xmm2 \n\t"\
124  "movd %%xmm0, %1 \n\t"\
125  "movd %%xmm2, %2 \n\t"\
126  : "+r" (len),\
127  "=r" (val),\
128  "=r" (v2)\
129  : "r" (((uint8_t*)(src+sample_index))-len),\
130  "r" (((uint8_t*)filter)-len),\
131  "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
132  XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
133 );
134 
135 #define COMMON_CORE_DBL_SSE2 \
136  x86_reg len= -8*c->filter_length;\
137 __asm__ volatile(\
138  "xorpd %%xmm0, %%xmm0 \n\t"\
139  "1: \n\t"\
140  "movupd (%1, %0), %%xmm1 \n\t"\
141  "mulpd (%2, %0), %%xmm1 \n\t"\
142  "addpd %%xmm1, %%xmm0 \n\t"\
143  "add $16, %0 \n\t"\
144  " js 1b \n\t"\
145  "movhlps %%xmm0, %%xmm1 \n\t"\
146  "addpd %%xmm1, %%xmm0 \n\t"\
147  "movsd %%xmm0, (%3) \n\t"\
148  : "+r" (len)\
149  : "r" (((uint8_t*)(src+sample_index))-len),\
150  "r" (((uint8_t*)filter)-len),\
151  "r" (dst+dst_index)\
152  XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
153 );
154 
155 #define LINEAR_CORE_DBL_SSE2 \
156  x86_reg len= -8*c->filter_length;\
157 __asm__ volatile(\
158  "xorpd %%xmm0, %%xmm0 \n\t"\
159  "xorpd %%xmm2, %%xmm2 \n\t"\
160  "1: \n\t"\
161  "movupd (%3, %0), %%xmm1 \n\t"\
162  "movapd %%xmm1, %%xmm3 \n\t"\
163  "mulpd (%4, %0), %%xmm1 \n\t"\
164  "mulpd (%5, %0), %%xmm3 \n\t"\
165  "addpd %%xmm1, %%xmm0 \n\t"\
166  "addpd %%xmm3, %%xmm2 \n\t"\
167  "add $16, %0 \n\t"\
168  " js 1b \n\t"\
169  "movhlps %%xmm0, %%xmm1 \n\t"\
170  "movhlps %%xmm2, %%xmm3 \n\t"\
171  "addpd %%xmm1, %%xmm0 \n\t"\
172  "addpd %%xmm3, %%xmm2 \n\t"\
173  "movsd %%xmm0, %1 \n\t"\
174  "movsd %%xmm2, %2 \n\t"\
175  : "+r" (len),\
176  "=m" (val),\
177  "=m" (v2)\
178  : "r" (((uint8_t*)(src+sample_index))-len),\
179  "r" (((uint8_t*)filter)-len),\
180  "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
181  XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
182 );