FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vis.h
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003 David S. Miller <davem@redhat.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /* You may be asking why I hard-code the instruction opcodes and don't
22  * use the normal VIS assembler mnenomics for the VIS instructions.
23  *
24  * The reason is that Sun, in their infinite wisdom, decided that a binary
25  * using a VIS instruction will cause it to be marked (in the ELF headers)
26  * as doing so, and this prevents the OS from loading such binaries if the
27  * current cpu doesn't have VIS. There is no way to easily override this
28  * behavior of the assembler that I am aware of.
29  *
30  * This totally defeats what libmpeg2 is trying to do which is allow a
31  * single binary to be created, and then detect the availability of VIS
32  * at runtime.
33  *
34  * I'm not saying that tainting the binary by default is bad, rather I'm
35  * saying that not providing a way to override this easily unnecessarily
36  * ties people's hands.
37  *
38  * Thus, we do the opcode encoding by hand and output 32-bit words in
39  * the assembler to keep the binary from becoming tainted.
40  */
41 
42 #ifndef AVCODEC_SPARC_VIS_H
43 #define AVCODEC_SPARC_VIS_H
44 
45 #define ACCEL_SPARC_VIS 1
46 #define ACCEL_SPARC_VIS2 2
47 
48 static inline int vis_level(void)
49 {
50  int accel = 0;
51  accel |= ACCEL_SPARC_VIS;
52  accel |= ACCEL_SPARC_VIS2;
53  return accel;
54 }
55 
56 #define vis_opc_base ((0x1 << 31) | (0x36 << 19))
57 #define vis_opf(X) ((X) << 5)
58 #define vis_sreg(X) (X)
59 #define vis_dreg(X) (((X)&0x1f)|((X)>>5))
60 #define vis_rs1_s(X) (vis_sreg(X) << 14)
61 #define vis_rs1_d(X) (vis_dreg(X) << 14)
62 #define vis_rs2_s(X) (vis_sreg(X) << 0)
63 #define vis_rs2_d(X) (vis_dreg(X) << 0)
64 #define vis_rd_s(X) (vis_sreg(X) << 25)
65 #define vis_rd_d(X) (vis_dreg(X) << 25)
66 
67 #define vis_ss2s(opf,rs1,rs2,rd) \
68  __asm__ volatile (".word %0" \
69  : : "i" (vis_opc_base | vis_opf(opf) | \
70  vis_rs1_s(rs1) | \
71  vis_rs2_s(rs2) | \
72  vis_rd_s(rd)))
73 
74 #define vis_dd2d(opf,rs1,rs2,rd) \
75  __asm__ volatile (".word %0" \
76  : : "i" (vis_opc_base | vis_opf(opf) | \
77  vis_rs1_d(rs1) | \
78  vis_rs2_d(rs2) | \
79  vis_rd_d(rd)))
80 
81 #define vis_ss2d(opf,rs1,rs2,rd) \
82  __asm__ volatile (".word %0" \
83  : : "i" (vis_opc_base | vis_opf(opf) | \
84  vis_rs1_s(rs1) | \
85  vis_rs2_s(rs2) | \
86  vis_rd_d(rd)))
87 
88 #define vis_sd2d(opf,rs1,rs2,rd) \
89  __asm__ volatile (".word %0" \
90  : : "i" (vis_opc_base | vis_opf(opf) | \
91  vis_rs1_s(rs1) | \
92  vis_rs2_d(rs2) | \
93  vis_rd_d(rd)))
94 
95 #define vis_d2s(opf,rs2,rd) \
96  __asm__ volatile (".word %0" \
97  : : "i" (vis_opc_base | vis_opf(opf) | \
98  vis_rs2_d(rs2) | \
99  vis_rd_s(rd)))
100 
101 #define vis_s2d(opf,rs2,rd) \
102  __asm__ volatile (".word %0" \
103  : : "i" (vis_opc_base | vis_opf(opf) | \
104  vis_rs2_s(rs2) | \
105  vis_rd_d(rd)))
106 
107 #define vis_d12d(opf,rs1,rd) \
108  __asm__ volatile (".word %0" \
109  : : "i" (vis_opc_base | vis_opf(opf) | \
110  vis_rs1_d(rs1) | \
111  vis_rd_d(rd)))
112 
113 #define vis_d22d(opf,rs2,rd) \
114  __asm__ volatile (".word %0" \
115  : : "i" (vis_opc_base | vis_opf(opf) | \
116  vis_rs2_d(rs2) | \
117  vis_rd_d(rd)))
118 
119 #define vis_s12s(opf,rs1,rd) \
120  __asm__ volatile (".word %0" \
121  : : "i" (vis_opc_base | vis_opf(opf) | \
122  vis_rs1_s(rs1) | \
123  vis_rd_s(rd)))
124 
125 #define vis_s22s(opf,rs2,rd) \
126  __asm__ volatile (".word %0" \
127  : : "i" (vis_opc_base | vis_opf(opf) | \
128  vis_rs2_s(rs2) | \
129  vis_rd_s(rd)))
130 
131 #define vis_s(opf,rd) \
132  __asm__ volatile (".word %0" \
133  : : "i" (vis_opc_base | vis_opf(opf) | \
134  vis_rd_s(rd)))
135 
136 #define vis_d(opf,rd) \
137  __asm__ volatile (".word %0" \
138  : : "i" (vis_opc_base | vis_opf(opf) | \
139  vis_rd_d(rd)))
140 
141 #define vis_r2m(op,rd,mem) \
142  __asm__ volatile (#op "\t%%f" #rd ", [%0]" : : "r" (&(mem)) )
143 
144 #define vis_r2m_2(op,rd,mem1,mem2) \
145  __asm__ volatile (#op "\t%%f" #rd ", [%0 + %1]" : : "r" (mem1), "r" (mem2) )
146 
147 #define vis_m2r(op,mem,rd) \
148  __asm__ volatile (#op "\t[%0], %%f" #rd : : "r" (&(mem)) )
149 
150 #define vis_m2r_2(op,mem1,mem2,rd) \
151  __asm__ volatile (#op "\t[%0 + %1], %%f" #rd : : "r" (mem1), "r" (mem2) )
152 
153 static inline void vis_set_gsr(unsigned int val)
154 {
155  __asm__ volatile("mov %0,%%asr19"
156  : : "r" (val));
157 }
158 
159 #define VIS_GSR_ALIGNADDR_MASK 0x0000007
160 #define VIS_GSR_ALIGNADDR_SHIFT 0
161 #define VIS_GSR_SCALEFACT_MASK 0x0000078
162 #define VIS_GSR_SCALEFACT_SHIFT 3
163 
164 #define vis_ld32(mem,rs1) vis_m2r(ld, mem, rs1)
165 #define vis_ld32_2(mem1,mem2,rs1) vis_m2r_2(ld, mem1, mem2, rs1)
166 #define vis_st32(rs1,mem) vis_r2m(st, rs1, mem)
167 #define vis_st32_2(rs1,mem1,mem2) vis_r2m_2(st, rs1, mem1, mem2)
168 #define vis_ld64(mem,rs1) vis_m2r(ldd, mem, rs1)
169 #define vis_ld64_2(mem1,mem2,rs1) vis_m2r_2(ldd, mem1, mem2, rs1)
170 #define vis_st64(rs1,mem) vis_r2m(std, rs1, mem)
171 #define vis_st64_2(rs1,mem1,mem2) vis_r2m_2(std, rs1, mem1, mem2)
172 
173 /* 16 and 32 bit partitioned addition and subtraction. The normal
174  * versions perform 4 16-bit or 2 32-bit additions or subtractions.
175  * The 's' versions perform 2 16-bit or 1 32-bit additions or
176  * subtractions.
177  */
178 
179 #define vis_padd16(rs1,rs2,rd) vis_dd2d(0x50, rs1, rs2, rd)
180 #define vis_padd16s(rs1,rs2,rd) vis_ss2s(0x51, rs1, rs2, rd)
181 #define vis_padd32(rs1,rs2,rd) vis_dd2d(0x52, rs1, rs2, rd)
182 #define vis_padd32s(rs1,rs2,rd) vis_ss2s(0x53, rs1, rs2, rd)
183 #define vis_psub16(rs1,rs2,rd) vis_dd2d(0x54, rs1, rs2, rd)
184 #define vis_psub16s(rs1,rs2,rd) vis_ss2s(0x55, rs1, rs2, rd)
185 #define vis_psub32(rs1,rs2,rd) vis_dd2d(0x56, rs1, rs2, rd)
186 #define vis_psub32s(rs1,rs2,rd) vis_ss2s(0x57, rs1, rs2, rd)
187 
188 /* Pixel formatting instructions. */
189 
190 #define vis_pack16(rs2,rd) vis_d2s( 0x3b, rs2, rd)
191 #define vis_pack32(rs1,rs2,rd) vis_dd2d(0x3a, rs1, rs2, rd)
192 #define vis_packfix(rs2,rd) vis_d2s( 0x3d, rs2, rd)
193 #define vis_expand(rs2,rd) vis_s2d( 0x4d, rs2, rd)
194 #define vis_pmerge(rs1,rs2,rd) vis_ss2d(0x4b, rs1, rs2, rd)
195 
196 /* Partitioned multiply instructions. */
197 
198 #define vis_mul8x16(rs1,rs2,rd) vis_sd2d(0x31, rs1, rs2, rd)
199 #define vis_mul8x16au(rs1,rs2,rd) vis_ss2d(0x33, rs1, rs2, rd)
200 #define vis_mul8x16al(rs1,rs2,rd) vis_ss2d(0x35, rs1, rs2, rd)
201 #define vis_mul8sux16(rs1,rs2,rd) vis_dd2d(0x36, rs1, rs2, rd)
202 #define vis_mul8ulx16(rs1,rs2,rd) vis_dd2d(0x37, rs1, rs2, rd)
203 #define vis_muld8sux16(rs1,rs2,rd) vis_ss2d(0x38, rs1, rs2, rd)
204 #define vis_muld8ulx16(rs1,rs2,rd) vis_ss2d(0x39, rs1, rs2, rd)
205 
206 /* Alignment instructions. */
207 
208 static inline const void *vis_alignaddr(const void *ptr)
209 {
210  __asm__ volatile("alignaddr %0, %%g0, %0"
211  : "=&r" (ptr)
212  : "0" (ptr));
213 
214  return ptr;
215 }
216 
217 static inline void vis_alignaddr_g0(void *ptr)
218 {
219  __asm__ volatile("alignaddr %0, %%g0, %%g0"
220  : : "r" (ptr));
221 }
222 
223 #define vis_faligndata(rs1,rs2,rd) vis_dd2d(0x48, rs1, rs2, rd)
224 
225 /* Logical operate instructions. */
226 
227 #define vis_fzero(rd) vis_d( 0x60, rd)
228 #define vis_fzeros(rd) vis_s( 0x61, rd)
229 #define vis_fone(rd) vis_d( 0x7e, rd)
230 #define vis_fones(rd) vis_s( 0x7f, rd)
231 #define vis_src1(rs1,rd) vis_d12d(0x74, rs1, rd)
232 #define vis_src1s(rs1,rd) vis_s12s(0x75, rs1, rd)
233 #define vis_src2(rs2,rd) vis_d22d(0x78, rs2, rd)
234 #define vis_src2s(rs2,rd) vis_s22s(0x79, rs2, rd)
235 #define vis_not1(rs1,rd) vis_d12d(0x6a, rs1, rd)
236 #define vis_not1s(rs1,rd) vis_s12s(0x6b, rs1, rd)
237 #define vis_not2(rs2,rd) vis_d22d(0x66, rs2, rd)
238 #define vis_not2s(rs2,rd) vis_s22s(0x67, rs2, rd)
239 #define vis_or(rs1,rs2,rd) vis_dd2d(0x7c, rs1, rs2, rd)
240 #define vis_ors(rs1,rs2,rd) vis_ss2s(0x7d, rs1, rs2, rd)
241 #define vis_nor(rs1,rs2,rd) vis_dd2d(0x62, rs1, rs2, rd)
242 #define vis_nors(rs1,rs2,rd) vis_ss2s(0x63, rs1, rs2, rd)
243 #define vis_and(rs1,rs2,rd) vis_dd2d(0x70, rs1, rs2, rd)
244 #define vis_ands(rs1,rs2,rd) vis_ss2s(0x71, rs1, rs2, rd)
245 #define vis_nand(rs1,rs2,rd) vis_dd2d(0x6e, rs1, rs2, rd)
246 #define vis_nands(rs1,rs2,rd) vis_ss2s(0x6f, rs1, rs2, rd)
247 #define vis_xor(rs1,rs2,rd) vis_dd2d(0x6c, rs1, rs2, rd)
248 #define vis_xors(rs1,rs2,rd) vis_ss2s(0x6d, rs1, rs2, rd)
249 #define vis_xnor(rs1,rs2,rd) vis_dd2d(0x72, rs1, rs2, rd)
250 #define vis_xnors(rs1,rs2,rd) vis_ss2s(0x73, rs1, rs2, rd)
251 #define vis_ornot1(rs1,rs2,rd) vis_dd2d(0x7a, rs1, rs2, rd)
252 #define vis_ornot1s(rs1,rs2,rd) vis_ss2s(0x7b, rs1, rs2, rd)
253 #define vis_ornot2(rs1,rs2,rd) vis_dd2d(0x76, rs1, rs2, rd)
254 #define vis_ornot2s(rs1,rs2,rd) vis_ss2s(0x77, rs1, rs2, rd)
255 #define vis_andnot1(rs1,rs2,rd) vis_dd2d(0x68, rs1, rs2, rd)
256 #define vis_andnot1s(rs1,rs2,rd) vis_ss2s(0x69, rs1, rs2, rd)
257 #define vis_andnot2(rs1,rs2,rd) vis_dd2d(0x64, rs1, rs2, rd)
258 #define vis_andnot2s(rs1,rs2,rd) vis_ss2s(0x65, rs1, rs2, rd)
259 
260 /* Pixel component distance. */
261 
262 #define vis_pdist(rs1,rs2,rd) vis_dd2d(0x3e, rs1, rs2, rd)
263 
264 #endif /* AVCODEC_SPARC_VIS_H */