FFmpeg
generic_macros_msa.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #ifndef AVUTIL_MIPS_GENERIC_MACROS_MSA_H
22 #define AVUTIL_MIPS_GENERIC_MACROS_MSA_H
23 
24 #include <stdint.h>
25 #include <msa.h>
26 #include <config.h>
27 
28 #if HAVE_MSA2
29 #include <msa2.h>
30 #endif
31 
32 #define ALIGNMENT 16
33 #define ALLOC_ALIGNED(align) __attribute__ ((aligned((align) << 1)))
34 
35 #define LD_V(RTYPE, psrc) *((RTYPE *)(psrc))
36 #define LD_UB(...) LD_V(v16u8, __VA_ARGS__)
37 #define LD_SB(...) LD_V(v16i8, __VA_ARGS__)
38 #define LD_UH(...) LD_V(v8u16, __VA_ARGS__)
39 #define LD_SH(...) LD_V(v8i16, __VA_ARGS__)
40 #define LD_UW(...) LD_V(v4u32, __VA_ARGS__)
41 #define LD_SW(...) LD_V(v4i32, __VA_ARGS__)
42 
43 #define ST_V(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
44 #define ST_UB(...) ST_V(v16u8, __VA_ARGS__)
45 #define ST_SB(...) ST_V(v16i8, __VA_ARGS__)
46 #define ST_UH(...) ST_V(v8u16, __VA_ARGS__)
47 #define ST_SH(...) ST_V(v8i16, __VA_ARGS__)
48 #define ST_UW(...) ST_V(v4u32, __VA_ARGS__)
49 #define ST_SW(...) ST_V(v4i32, __VA_ARGS__)
50 
51 #if (__mips_isa_rev >= 6)
52  #define LH(psrc) \
53  ( { \
54  uint16_t val_lh_m = *(uint16_t *)(psrc); \
55  val_lh_m; \
56  } )
57 
58  #define LW(psrc) \
59  ( { \
60  uint32_t val_lw_m = *(uint32_t *)(psrc); \
61  val_lw_m; \
62  } )
63 
64  #if (__mips == 64)
65  #define LD(psrc) \
66  ( { \
67  uint64_t val_ld_m = *(uint64_t *)(psrc); \
68  val_ld_m; \
69  } )
70  #else // !(__mips == 64)
71  #define LD(psrc) \
72  ( { \
73  uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
74  uint32_t val0_ld_m, val1_ld_m; \
75  uint64_t val_ld_m = 0; \
76  \
77  val0_ld_m = LW(psrc_ld_m); \
78  val1_ld_m = LW(psrc_ld_m + 4); \
79  \
80  val_ld_m = (uint64_t) (val1_ld_m); \
81  val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \
82  val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \
83  \
84  val_ld_m; \
85  } )
86  #endif // (__mips == 64)
87 
88  #define SH(val, pdst) *(uint16_t *)(pdst) = (val);
89  #define SW(val, pdst) *(uint32_t *)(pdst) = (val);
90  #define SD(val, pdst) *(uint64_t *)(pdst) = (val);
91 
92 #else // !(__mips_isa_rev >= 6)
93  #define LH(psrc) \
94  ( { \
95  uint8_t *psrc_lh_m = (uint8_t *) (psrc); \
96  uint16_t val_lh_m; \
97  \
98  __asm__ volatile ( \
99  "ulh %[val_lh_m], %[psrc_lh_m] \n\t" \
100  \
101  : [val_lh_m] "=r" (val_lh_m) \
102  : [psrc_lh_m] "m" (*psrc_lh_m) \
103  ); \
104  \
105  val_lh_m; \
106  } )
107 
108  #define LW(psrc) \
109  ( { \
110  uint8_t *psrc_lw_m = (uint8_t *) (psrc); \
111  uint32_t val_lw_m; \
112  \
113  __asm__ volatile ( \
114  "ulw %[val_lw_m], %[psrc_lw_m] \n\t" \
115  \
116  : [val_lw_m] "=r" (val_lw_m) \
117  : [psrc_lw_m] "m" (*psrc_lw_m) \
118  ); \
119  \
120  val_lw_m; \
121  } )
122 
123  #if (__mips == 64)
124  #define LD(psrc) \
125  ( { \
126  uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
127  uint64_t val_ld_m = 0; \
128  \
129  __asm__ volatile ( \
130  "uld %[val_ld_m], %[psrc_ld_m] \n\t" \
131  \
132  : [val_ld_m] "=r" (val_ld_m) \
133  : [psrc_ld_m] "m" (*psrc_ld_m) \
134  ); \
135  \
136  val_ld_m; \
137  } )
138  #else // !(__mips == 64)
139  #define LD(psrc) \
140  ( { \
141  uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
142  uint32_t val0_ld_m, val1_ld_m; \
143  uint64_t val_ld_m = 0; \
144  \
145  val0_ld_m = LW(psrc_ld_m); \
146  val1_ld_m = LW(psrc_ld_m + 4); \
147  \
148  val_ld_m = (uint64_t) (val1_ld_m); \
149  val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \
150  val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \
151  \
152  val_ld_m; \
153  } )
154  #endif // (__mips == 64)
155 
156  #define SH(val, pdst) \
157  { \
158  uint8_t *pdst_sh_m = (uint8_t *) (pdst); \
159  uint16_t val_sh_m = (val); \
160  \
161  __asm__ volatile ( \
162  "ush %[val_sh_m], %[pdst_sh_m] \n\t" \
163  \
164  : [pdst_sh_m] "=m" (*pdst_sh_m) \
165  : [val_sh_m] "r" (val_sh_m) \
166  ); \
167  }
168 
169  #define SW(val, pdst) \
170  { \
171  uint8_t *pdst_sw_m = (uint8_t *) (pdst); \
172  uint32_t val_sw_m = (val); \
173  \
174  __asm__ volatile ( \
175  "usw %[val_sw_m], %[pdst_sw_m] \n\t" \
176  \
177  : [pdst_sw_m] "=m" (*pdst_sw_m) \
178  : [val_sw_m] "r" (val_sw_m) \
179  ); \
180  }
181 
182  #define SD(val, pdst) \
183  { \
184  uint8_t *pdst_sd_m = (uint8_t *) (pdst); \
185  uint32_t val0_sd_m, val1_sd_m; \
186  \
187  val0_sd_m = (uint32_t) ((val) & 0x00000000FFFFFFFF); \
188  val1_sd_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF); \
189  \
190  SW(val0_sd_m, pdst_sd_m); \
191  SW(val1_sd_m, pdst_sd_m + 4); \
192  }
193 #endif // (__mips_isa_rev >= 6)
194 
195 /* Description : Load 4 words with stride
196  Arguments : Inputs - psrc (source pointer to load from)
197  - stride
198  Outputs - out0, out1, out2, out3
199  Details : Loads word in 'out0' from (psrc)
200  Loads word in 'out1' from (psrc + stride)
201  Loads word in 'out2' from (psrc + 2 * stride)
202  Loads word in 'out3' from (psrc + 3 * stride)
203 */
204 #define LW4(psrc, stride, out0, out1, out2, out3) \
205 { \
206  out0 = LW((psrc)); \
207  out1 = LW((psrc) + stride); \
208  out2 = LW((psrc) + 2 * stride); \
209  out3 = LW((psrc) + 3 * stride); \
210 }
211 
212 #define LW2(psrc, stride, out0, out1) \
213 { \
214  out0 = LW((psrc)); \
215  out1 = LW((psrc) + stride); \
216 }
217 
218 /* Description : Load double words with stride
219  Arguments : Inputs - psrc (source pointer to load from)
220  - stride
221  Outputs - out0, out1
222  Details : Loads double word in 'out0' from (psrc)
223  Loads double word in 'out1' from (psrc + stride)
224 */
225 #define LD2(psrc, stride, out0, out1) \
226 { \
227  out0 = LD((psrc)); \
228  out1 = LD((psrc) + stride); \
229 }
230 #define LD4(psrc, stride, out0, out1, out2, out3) \
231 { \
232  LD2((psrc), stride, out0, out1); \
233  LD2((psrc) + 2 * stride, stride, out2, out3); \
234 }
235 
236 /* Description : Store 4 words with stride
237  Arguments : Inputs - in0, in1, in2, in3, pdst, stride
238  Details : Stores word from 'in0' to (pdst)
239  Stores word from 'in1' to (pdst + stride)
240  Stores word from 'in2' to (pdst + 2 * stride)
241  Stores word from 'in3' to (pdst + 3 * stride)
242 */
243 #define SW4(in0, in1, in2, in3, pdst, stride) \
244 { \
245  SW(in0, (pdst)) \
246  SW(in1, (pdst) + stride); \
247  SW(in2, (pdst) + 2 * stride); \
248  SW(in3, (pdst) + 3 * stride); \
249 }
250 
251 /* Description : Store 4 double words with stride
252  Arguments : Inputs - in0, in1, in2, in3, pdst, stride
253  Details : Stores double word from 'in0' to (pdst)
254  Stores double word from 'in1' to (pdst + stride)
255  Stores double word from 'in2' to (pdst + 2 * stride)
256  Stores double word from 'in3' to (pdst + 3 * stride)
257 */
258 #define SD4(in0, in1, in2, in3, pdst, stride) \
259 { \
260  SD(in0, (pdst)) \
261  SD(in1, (pdst) + stride); \
262  SD(in2, (pdst) + 2 * stride); \
263  SD(in3, (pdst) + 3 * stride); \
264 }
265 
266 /* Description : Load vector elements with stride
267  Arguments : Inputs - psrc (source pointer to load from)
268  - stride
269  Outputs - out0, out1
270  Return Type - as per RTYPE
271  Details : Loads elements in 'out0' from (psrc)
272  Loads elements in 'out1' from (psrc + stride)
273 */
274 #define LD_V2(RTYPE, psrc, stride, out0, out1) \
275 { \
276  out0 = LD_V(RTYPE, (psrc)); \
277  out1 = LD_V(RTYPE, (psrc) + stride); \
278 }
279 #define LD_UB2(...) LD_V2(v16u8, __VA_ARGS__)
280 #define LD_SB2(...) LD_V2(v16i8, __VA_ARGS__)
281 #define LD_UH2(...) LD_V2(v8u16, __VA_ARGS__)
282 #define LD_SH2(...) LD_V2(v8i16, __VA_ARGS__)
283 #define LD_SW2(...) LD_V2(v4i32, __VA_ARGS__)
284 
285 #define LD_V3(RTYPE, psrc, stride, out0, out1, out2) \
286 { \
287  LD_V2(RTYPE, (psrc), stride, out0, out1); \
288  out2 = LD_V(RTYPE, (psrc) + 2 * stride); \
289 }
290 #define LD_UB3(...) LD_V3(v16u8, __VA_ARGS__)
291 #define LD_SB3(...) LD_V3(v16i8, __VA_ARGS__)
292 
293 #define LD_V4(RTYPE, psrc, stride, out0, out1, out2, out3) \
294 { \
295  LD_V2(RTYPE, (psrc), stride, out0, out1); \
296  LD_V2(RTYPE, (psrc) + 2 * stride , stride, out2, out3); \
297 }
298 #define LD_UB4(...) LD_V4(v16u8, __VA_ARGS__)
299 #define LD_SB4(...) LD_V4(v16i8, __VA_ARGS__)
300 #define LD_UH4(...) LD_V4(v8u16, __VA_ARGS__)
301 #define LD_SH4(...) LD_V4(v8i16, __VA_ARGS__)
302 
303 #define LD_V5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \
304 { \
305  LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
306  out4 = LD_V(RTYPE, (psrc) + 4 * stride); \
307 }
308 #define LD_UB5(...) LD_V5(v16u8, __VA_ARGS__)
309 #define LD_SB5(...) LD_V5(v16i8, __VA_ARGS__)
310 
311 #define LD_V6(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5) \
312 { \
313  LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
314  LD_V2(RTYPE, (psrc) + 4 * stride, stride, out4, out5); \
315 }
316 #define LD_UB6(...) LD_V6(v16u8, __VA_ARGS__)
317 #define LD_SB6(...) LD_V6(v16i8, __VA_ARGS__)
318 #define LD_UH6(...) LD_V6(v8u16, __VA_ARGS__)
319 #define LD_SH6(...) LD_V6(v8i16, __VA_ARGS__)
320 
321 #define LD_V7(RTYPE, psrc, stride, \
322  out0, out1, out2, out3, out4, out5, out6) \
323 { \
324  LD_V5(RTYPE, (psrc), stride, out0, out1, out2, out3, out4); \
325  LD_V2(RTYPE, (psrc) + 5 * stride, stride, out5, out6); \
326 }
327 #define LD_UB7(...) LD_V7(v16u8, __VA_ARGS__)
328 #define LD_SB7(...) LD_V7(v16i8, __VA_ARGS__)
329 
330 #define LD_V8(RTYPE, psrc, stride, \
331  out0, out1, out2, out3, out4, out5, out6, out7) \
332 { \
333  LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
334  LD_V4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \
335 }
336 #define LD_UB8(...) LD_V8(v16u8, __VA_ARGS__)
337 #define LD_SB8(...) LD_V8(v16i8, __VA_ARGS__)
338 #define LD_UH8(...) LD_V8(v8u16, __VA_ARGS__)
339 #define LD_SH8(...) LD_V8(v8i16, __VA_ARGS__)
340 
341 #define LD_V16(RTYPE, psrc, stride, \
342  out0, out1, out2, out3, out4, out5, out6, out7, \
343  out8, out9, out10, out11, out12, out13, out14, out15) \
344 { \
345  LD_V8(RTYPE, (psrc), stride, \
346  out0, out1, out2, out3, out4, out5, out6, out7); \
347  LD_V8(RTYPE, (psrc) + 8 * stride, stride, \
348  out8, out9, out10, out11, out12, out13, out14, out15); \
349 }
350 #define LD_SH16(...) LD_V16(v8i16, __VA_ARGS__)
351 
352 /* Description : Store vectors with stride
353  Arguments : Inputs - in0, in1, stride
354  Outputs - pdst (destination pointer to store to)
355  Details : Stores elements from 'in0' to (pdst)
356  Stores elements from 'in1' to (pdst + stride)
357 */
358 #define ST_V2(RTYPE, in0, in1, pdst, stride) \
359 { \
360  ST_V(RTYPE, in0, (pdst)); \
361  ST_V(RTYPE, in1, (pdst) + stride); \
362 }
363 #define ST_UB2(...) ST_V2(v16u8, __VA_ARGS__)
364 #define ST_SB2(...) ST_V2(v16i8, __VA_ARGS__)
365 #define ST_UH2(...) ST_V2(v8u16, __VA_ARGS__)
366 #define ST_SH2(...) ST_V2(v8i16, __VA_ARGS__)
367 #define ST_SW2(...) ST_V2(v4i32, __VA_ARGS__)
368 
369 #define ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \
370 { \
371  ST_V2(RTYPE, in0, in1, (pdst), stride); \
372  ST_V2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
373 }
374 #define ST_UB4(...) ST_V4(v16u8, __VA_ARGS__)
375 #define ST_SB4(...) ST_V4(v16i8, __VA_ARGS__)
376 #define ST_SH4(...) ST_V4(v8i16, __VA_ARGS__)
377 #define ST_SW4(...) ST_V4(v4i32, __VA_ARGS__)
378 
379 #define ST_V6(RTYPE, in0, in1, in2, in3, in4, in5, pdst, stride) \
380 { \
381  ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
382  ST_V2(RTYPE, in4, in5, (pdst) + 4 * stride, stride); \
383 }
384 #define ST_SH6(...) ST_V6(v8i16, __VA_ARGS__)
385 
386 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
387 { \
388  ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
389  ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
390 }
391 #define ST_UB8(...) ST_V8(v16u8, __VA_ARGS__)
392 #define ST_SH8(...) ST_V8(v8i16, __VA_ARGS__)
393 #define ST_SW8(...) ST_V8(v4i32, __VA_ARGS__)
394 
395 /* Description : Store half word elements of vector with stride
396  * Arguments : Inputs - in source vector
397  * - pdst (destination pointer to store to)
398  * - stride
399  * Details : Stores half word 'idx0' from 'in' to (pdst)
400  * Stores half word 'idx1' from 'in' to (pdst + stride)
401  * Similar for other elements
402  */
403 #define ST_H1(in, idx, pdst) \
404 { \
405  uint16_t out0_m; \
406  out0_m = __msa_copy_u_h((v8i16) in, idx); \
407  SH(out0_m, (pdst)); \
408 }
409 #define ST_H2(in, idx0, idx1, pdst, stride) \
410 { \
411  uint16_t out0_m, out1_m; \
412  out0_m = __msa_copy_u_h((v8i16) in, idx0); \
413  out1_m = __msa_copy_u_h((v8i16) in, idx1); \
414  SH(out0_m, (pdst)); \
415  SH(out1_m, (pdst) + stride); \
416 }
417 #define ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \
418 { \
419  uint16_t out0_m, out1_m, out2_m, out3_m; \
420  out0_m = __msa_copy_u_h((v8i16) in, idx0); \
421  out1_m = __msa_copy_u_h((v8i16) in, idx1); \
422  out2_m = __msa_copy_u_h((v8i16) in, idx2); \
423  out3_m = __msa_copy_u_h((v8i16) in, idx3); \
424  SH(out0_m, (pdst)); \
425  SH(out1_m, (pdst) + stride); \
426  SH(out2_m, (pdst) + 2 * stride); \
427  SH(out3_m, (pdst) + 3 * stride); \
428 }
429 #define ST_H8(in, idx0, idx1, idx2, idx3, idx4, idx5, \
430  idx6, idx7, pdst, stride) \
431 { \
432  ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \
433  ST_H4(in, idx4, idx5, idx6, idx7, (pdst) + 4*stride, stride) \
434 }
435 
436 /* Description : Store word elements of vector with stride
437  * Arguments : Inputs - in source vector
438  * - pdst (destination pointer to store to)
439  * - stride
440  * Details : Stores word 'idx0' from 'in' to (pdst)
441  * Stores word 'idx1' from 'in' to (pdst + stride)
442  * Similar for other elements
443  */
444 #define ST_W1(in, idx, pdst) \
445 { \
446  uint32_t out0_m; \
447  out0_m = __msa_copy_u_w((v4i32) in, idx); \
448  SW(out0_m, (pdst)); \
449 }
450 #define ST_W2(in, idx0, idx1, pdst, stride) \
451 { \
452  uint32_t out0_m, out1_m; \
453  out0_m = __msa_copy_u_w((v4i32) in, idx0); \
454  out1_m = __msa_copy_u_w((v4i32) in, idx1); \
455  SW(out0_m, (pdst)); \
456  SW(out1_m, (pdst) + stride); \
457 }
458 #define ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride) \
459 { \
460  uint32_t out0_m, out1_m, out2_m, out3_m; \
461  out0_m = __msa_copy_u_w((v4i32) in, idx0); \
462  out1_m = __msa_copy_u_w((v4i32) in, idx1); \
463  out2_m = __msa_copy_u_w((v4i32) in, idx2); \
464  out3_m = __msa_copy_u_w((v4i32) in, idx3); \
465  SW(out0_m, (pdst)); \
466  SW(out1_m, (pdst) + stride); \
467  SW(out2_m, (pdst) + 2*stride); \
468  SW(out3_m, (pdst) + 3*stride); \
469 }
470 #define ST_W8(in0, in1, idx0, idx1, idx2, idx3, \
471  idx4, idx5, idx6, idx7, pdst, stride) \
472 { \
473  ST_W4(in0, idx0, idx1, idx2, idx3, pdst, stride) \
474  ST_W4(in1, idx4, idx5, idx6, idx7, pdst + 4*stride, stride) \
475 }
476 
477 /* Description : Store double word elements of vector with stride
478  * Arguments : Inputs - in source vector
479  * - pdst (destination pointer to store to)
480  * - stride
481  * Details : Stores double word 'idx0' from 'in' to (pdst)
482  * Stores double word 'idx1' from 'in' to (pdst + stride)
483  * Similar for other elements
484  */
485 #define ST_D1(in, idx, pdst) \
486 { \
487  uint64_t out0_m; \
488  out0_m = __msa_copy_u_d((v2i64) in, idx); \
489  SD(out0_m, (pdst)); \
490 }
491 #define ST_D2(in, idx0, idx1, pdst, stride) \
492 { \
493  uint64_t out0_m, out1_m; \
494  out0_m = __msa_copy_u_d((v2i64) in, idx0); \
495  out1_m = __msa_copy_u_d((v2i64) in, idx1); \
496  SD(out0_m, (pdst)); \
497  SD(out1_m, (pdst) + stride); \
498 }
499 #define ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
500 { \
501  uint64_t out0_m, out1_m, out2_m, out3_m; \
502  out0_m = __msa_copy_u_d((v2i64) in0, idx0); \
503  out1_m = __msa_copy_u_d((v2i64) in0, idx1); \
504  out2_m = __msa_copy_u_d((v2i64) in1, idx2); \
505  out3_m = __msa_copy_u_d((v2i64) in1, idx3); \
506  SD(out0_m, (pdst)); \
507  SD(out1_m, (pdst) + stride); \
508  SD(out2_m, (pdst) + 2 * stride); \
509  SD(out3_m, (pdst) + 3 * stride); \
510 }
511 #define ST_D8(in0, in1, in2, in3, idx0, idx1, idx2, idx3, \
512  idx4, idx5, idx6, idx7, pdst, stride) \
513 { \
514  ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
515  ST_D4(in2, in3, idx4, idx5, idx6, idx7, pdst + 4 * stride, stride) \
516 }
517 
518 /* Description : Store as 12x8 byte block to destination memory from
519  input vectors
520  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride
521  Details : Index 0 double word element from input vector 'in0' is copied
522  and stored to destination memory at (pblk_12x8_m) followed by
523  index 2 word element from same input vector 'in0' at
524  (pblk_12x8_m + 8)
525  Similar to remaining lines
526 */
527 #define ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
528 { \
529  uint64_t out0_m, out1_m, out2_m, out3_m; \
530  uint64_t out4_m, out5_m, out6_m, out7_m; \
531  uint32_t out8_m, out9_m, out10_m, out11_m; \
532  uint32_t out12_m, out13_m, out14_m, out15_m; \
533  uint8_t *pblk_12x8_m = (uint8_t *) (pdst); \
534  \
535  out0_m = __msa_copy_u_d((v2i64) in0, 0); \
536  out1_m = __msa_copy_u_d((v2i64) in1, 0); \
537  out2_m = __msa_copy_u_d((v2i64) in2, 0); \
538  out3_m = __msa_copy_u_d((v2i64) in3, 0); \
539  out4_m = __msa_copy_u_d((v2i64) in4, 0); \
540  out5_m = __msa_copy_u_d((v2i64) in5, 0); \
541  out6_m = __msa_copy_u_d((v2i64) in6, 0); \
542  out7_m = __msa_copy_u_d((v2i64) in7, 0); \
543  \
544  out8_m = __msa_copy_u_w((v4i32) in0, 2); \
545  out9_m = __msa_copy_u_w((v4i32) in1, 2); \
546  out10_m = __msa_copy_u_w((v4i32) in2, 2); \
547  out11_m = __msa_copy_u_w((v4i32) in3, 2); \
548  out12_m = __msa_copy_u_w((v4i32) in4, 2); \
549  out13_m = __msa_copy_u_w((v4i32) in5, 2); \
550  out14_m = __msa_copy_u_w((v4i32) in6, 2); \
551  out15_m = __msa_copy_u_w((v4i32) in7, 2); \
552  \
553  SD(out0_m, pblk_12x8_m); \
554  SW(out8_m, pblk_12x8_m + 8); \
555  pblk_12x8_m += stride; \
556  SD(out1_m, pblk_12x8_m); \
557  SW(out9_m, pblk_12x8_m + 8); \
558  pblk_12x8_m += stride; \
559  SD(out2_m, pblk_12x8_m); \
560  SW(out10_m, pblk_12x8_m + 8); \
561  pblk_12x8_m += stride; \
562  SD(out3_m, pblk_12x8_m); \
563  SW(out11_m, pblk_12x8_m + 8); \
564  pblk_12x8_m += stride; \
565  SD(out4_m, pblk_12x8_m); \
566  SW(out12_m, pblk_12x8_m + 8); \
567  pblk_12x8_m += stride; \
568  SD(out5_m, pblk_12x8_m); \
569  SW(out13_m, pblk_12x8_m + 8); \
570  pblk_12x8_m += stride; \
571  SD(out6_m, pblk_12x8_m); \
572  SW(out14_m, pblk_12x8_m + 8); \
573  pblk_12x8_m += stride; \
574  SD(out7_m, pblk_12x8_m); \
575  SW(out15_m, pblk_12x8_m + 8); \
576 }
577 
578 /* Description : average with rounding (in0 + in1 + 1) / 2.
579  Arguments : Inputs - in0, in1, in2, in3,
580  Outputs - out0, out1
581  Return Type - as per RTYPE
582  Details : Each byte element from 'in0' vector is added with each byte
583  element from 'in1' vector. The addition of the elements plus 1
584  (for rounding) is done unsigned with full precision,
585  i.e. the result has one extra bit. Unsigned division by 2
586  (or logical shift right by one bit) is performed before writing
587  the result to vector 'out0'
588  Similar for the pair of 'in2' and 'in3'
589 */
590 #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
591 { \
592  out0 = (RTYPE) __msa_aver_u_b((v16u8) in0, (v16u8) in1); \
593  out1 = (RTYPE) __msa_aver_u_b((v16u8) in2, (v16u8) in3); \
594 }
595 #define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
596 
597 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
598  out0, out1, out2, out3) \
599 { \
600  AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
601  AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \
602 }
603 #define AVER_UB4_UB(...) AVER_UB4(v16u8, __VA_ARGS__)
604 
605 /* Description : Immediate number of columns to slide with zero
606  Arguments : Inputs - in0, in1, slide_val
607  Outputs - out0, out1
608  Return Type - as per RTYPE
609  Details : Byte elements from 'zero_m' vector are slide into 'in0' by
610  number of elements specified by 'slide_val'
611 */
612 #define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) \
613 { \
614  v16i8 zero_m = { 0 }; \
615  out0 = (RTYPE) __msa_sldi_b((v16i8) zero_m, (v16i8) in0, slide_val); \
616  out1 = (RTYPE) __msa_sldi_b((v16i8) zero_m, (v16i8) in1, slide_val); \
617 }
618 #define SLDI_B2_0_UB(...) SLDI_B2_0(v16u8, __VA_ARGS__)
619 #define SLDI_B2_0_SB(...) SLDI_B2_0(v16i8, __VA_ARGS__)
620 #define SLDI_B2_0_SW(...) SLDI_B2_0(v4i32, __VA_ARGS__)
621 
622 #define SLDI_B3_0(RTYPE, in0, in1, in2, out0, out1, out2, slide_val) \
623 { \
624  v16i8 zero_m = { 0 }; \
625  SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val); \
626  out2 = (RTYPE) __msa_sldi_b((v16i8) zero_m, (v16i8) in2, slide_val); \
627 }
628 #define SLDI_B3_0_UB(...) SLDI_B3_0(v16u8, __VA_ARGS__)
629 #define SLDI_B3_0_SB(...) SLDI_B3_0(v16i8, __VA_ARGS__)
630 
631 #define SLDI_B4_0(RTYPE, in0, in1, in2, in3, \
632  out0, out1, out2, out3, slide_val) \
633 { \
634  SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val); \
635  SLDI_B2_0(RTYPE, in2, in3, out2, out3, slide_val); \
636 }
637 #define SLDI_B4_0_UB(...) SLDI_B4_0(v16u8, __VA_ARGS__)
638 #define SLDI_B4_0_SB(...) SLDI_B4_0(v16i8, __VA_ARGS__)
639 #define SLDI_B4_0_SH(...) SLDI_B4_0(v8i16, __VA_ARGS__)
640 
641 /* Description : Immediate number of columns to slide
642  Arguments : Inputs - in0_0, in0_1, in1_0, in1_1, slide_val
643  Outputs - out0, out1
644  Return Type - as per RTYPE
645  Details : Byte elements from 'in0_0' vector are slide into 'in1_0' by
646  number of elements specified by 'slide_val'
647 */
648 #define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \
649 { \
650  out0 = (RTYPE) __msa_sldi_b((v16i8) in0_0, (v16i8) in1_0, slide_val); \
651  out1 = (RTYPE) __msa_sldi_b((v16i8) in0_1, (v16i8) in1_1, slide_val); \
652 }
653 #define SLDI_B2_UB(...) SLDI_B2(v16u8, __VA_ARGS__)
654 #define SLDI_B2_SB(...) SLDI_B2(v16i8, __VA_ARGS__)
655 #define SLDI_B2_SH(...) SLDI_B2(v8i16, __VA_ARGS__)
656 
657 #define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2, \
658  out0, out1, out2, slide_val) \
659 { \
660  SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \
661  out2 = (RTYPE) __msa_sldi_b((v16i8) in0_2, (v16i8) in1_2, slide_val); \
662 }
663 #define SLDI_B3_SB(...) SLDI_B3(v16i8, __VA_ARGS__)
664 #define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__)
665 
666 /* Description : Shuffle byte vector elements as per mask vector
667  Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
668  Outputs - out0, out1
669  Return Type - as per RTYPE
670  Details : Selective byte elements from in0 & in1 are copied to out0 as
671  per control vector mask0
672  Selective byte elements from in2 & in3 are copied to out1 as
673  per control vector mask1
674 */
675 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
676 { \
677  out0 = (RTYPE) __msa_vshf_b((v16i8) mask0, (v16i8) in1, (v16i8) in0); \
678  out1 = (RTYPE) __msa_vshf_b((v16i8) mask1, (v16i8) in3, (v16i8) in2); \
679 }
680 #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
681 #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
682 #define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
683 #define VSHF_B2_SH(...) VSHF_B2(v8i16, __VA_ARGS__)
684 
685 #define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
686  out0, out1, out2) \
687 { \
688  VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
689  out2 = (RTYPE) __msa_vshf_b((v16i8) mask2, (v16i8) in5, (v16i8) in4); \
690 }
691 #define VSHF_B3_SB(...) VSHF_B3(v16i8, __VA_ARGS__)
692 
693 #define VSHF_B4(RTYPE, in0, in1, mask0, mask1, mask2, mask3, \
694  out0, out1, out2, out3) \
695 { \
696  VSHF_B2(RTYPE, in0, in1, in0, in1, mask0, mask1, out0, out1); \
697  VSHF_B2(RTYPE, in0, in1, in0, in1, mask2, mask3, out2, out3); \
698 }
699 #define VSHF_B4_SB(...) VSHF_B4(v16i8, __VA_ARGS__)
700 #define VSHF_B4_SH(...) VSHF_B4(v8i16, __VA_ARGS__)
701 
702 /* Description : Shuffle halfword vector elements as per mask vector
703  Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
704  Outputs - out0, out1
705  Return Type - as per RTYPE
706  Details : Selective halfword elements from in0 & in1 are copied to out0
707  as per control vector mask0
708  Selective halfword elements from in2 & in3 are copied to out1
709  as per control vector mask1
710 */
711 #define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
712 { \
713  out0 = (RTYPE) __msa_vshf_h((v8i16) mask0, (v8i16) in1, (v8i16) in0); \
714  out1 = (RTYPE) __msa_vshf_h((v8i16) mask1, (v8i16) in3, (v8i16) in2); \
715 }
716 #define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__)
717 
718 #define VSHF_H3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
719  out0, out1, out2) \
720 { \
721  VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
722  out2 = (RTYPE) __msa_vshf_h((v8i16) mask2, (v8i16) in5, (v8i16) in4); \
723 }
724 #define VSHF_H3_SH(...) VSHF_H3(v8i16, __VA_ARGS__)
725 
726 /* Description : Shuffle byte vector elements as per mask vector
727  Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
728  Outputs - out0, out1
729  Return Type - as per RTYPE
730  Details : Selective byte elements from in0 & in1 are copied to out0 as
731  per control vector mask0
732  Selective byte elements from in2 & in3 are copied to out1 as
733  per control vector mask1
734 */
735 #define VSHF_W2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
736 { \
737  out0 = (RTYPE) __msa_vshf_w((v4i32) mask0, (v4i32) in1, (v4i32) in0); \
738  out1 = (RTYPE) __msa_vshf_w((v4i32) mask1, (v4i32) in3, (v4i32) in2); \
739 }
740 #define VSHF_W2_SB(...) VSHF_W2(v16i8, __VA_ARGS__)
741 
742 /* Description : Dot product of byte vector elements
743  Arguments : Inputs - mult0, mult1
744  cnst0, cnst1
745  Outputs - out0, out1
746  Return Type - as per RTYPE
747  Details : Unsigned byte elements from mult0 are multiplied with
748  unsigned byte elements from cnst0 producing a result
749  twice the size of input i.e. unsigned halfword.
750  Then this multiplication results of adjacent odd-even elements
751  are added together and stored to the out vector
752  (2 unsigned halfword results)
753 */
754 #define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
755 { \
756  out0 = (RTYPE) __msa_dotp_u_h((v16u8) mult0, (v16u8) cnst0); \
757  out1 = (RTYPE) __msa_dotp_u_h((v16u8) mult1, (v16u8) cnst1); \
758 }
759 #define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__)
760 
761 #define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, \
762  cnst0, cnst1, cnst2, cnst3, \
763  out0, out1, out2, out3) \
764 { \
765  DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
766  DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
767 }
768 #define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__)
769 
770 /* Description : Dot product of byte vector elements
771  Arguments : Inputs - mult0, mult1
772  cnst0, cnst1
773  Outputs - out0, out1
774  Return Type - as per RTYPE
775  Details : Signed byte elements from mult0 are multiplied with
776  signed byte elements from cnst0 producing a result
777  twice the size of input i.e. signed halfword.
778  Then this multiplication results of adjacent odd-even elements
779  are added together and stored to the out vector
780  (2 signed halfword results)
781 */
782 #define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
783 { \
784  out0 = (RTYPE) __msa_dotp_s_h((v16i8) mult0, (v16i8) cnst0); \
785  out1 = (RTYPE) __msa_dotp_s_h((v16i8) mult1, (v16i8) cnst1); \
786 }
787 #define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
788 
789 #define DOTP_SB3(RTYPE, mult0, mult1, mult2, cnst0, cnst1, cnst2, \
790  out0, out1, out2) \
791 { \
792  DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
793  out2 = (RTYPE) __msa_dotp_s_h((v16i8) mult2, (v16i8) cnst2); \
794 }
795 #define DOTP_SB3_SH(...) DOTP_SB3(v8i16, __VA_ARGS__)
796 
797 #define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, \
798  cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
799 { \
800  DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
801  DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
802 }
803 #define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__)
804 
805 /* Description : Dot product of halfword vector elements
806  Arguments : Inputs - mult0, mult1
807  cnst0, cnst1
808  Outputs - out0, out1
809  Return Type - as per RTYPE
810  Details : Signed halfword elements from mult0 are multiplied with
811  signed halfword elements from cnst0 producing a result
812  twice the size of input i.e. signed word.
813  Then this multiplication results of adjacent odd-even elements
814  are added together and stored to the out vector
815  (2 signed word results)
816 */
817 #define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
818 { \
819  out0 = (RTYPE) __msa_dotp_s_w((v8i16) mult0, (v8i16) cnst0); \
820  out1 = (RTYPE) __msa_dotp_s_w((v8i16) mult1, (v8i16) cnst1); \
821 }
822 #define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
823 
824 #define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, \
825  cnst0, cnst1, cnst2, cnst3, \
826  out0, out1, out2, out3) \
827 { \
828  DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
829  DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
830 }
831 #define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__)
832 
833 /* Description : Dot product & addition of byte vector elements
834  Arguments : Inputs - mult0, mult1
835  cnst0, cnst1
836  Outputs - out0, out1
837  Return Type - as per RTYPE
838  Details : Signed byte elements from mult0 are multiplied with
839  signed byte elements from cnst0 producing a result
840  twice the size of input i.e. signed halfword.
841  Then this multiplication results of adjacent odd-even elements
842  are added to the out vector
843  (2 signed halfword results)
844 */
845 #define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
846 { \
847  out0 = (RTYPE) __msa_dpadd_s_h((v8i16) out0, \
848  (v16i8) mult0, (v16i8) cnst0); \
849  out1 = (RTYPE) __msa_dpadd_s_h((v8i16) out1, \
850  (v16i8) mult1, (v16i8) cnst1); \
851 }
852 #define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__)
853 
854 #define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, \
855  cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
856 { \
857  DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
858  DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
859 }
860 #define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__)
861 
862 /* Description : Dot product & addition of byte vector elements
863  Arguments : Inputs - mult0, mult1
864  cnst0, cnst1
865  Outputs - out0, out1
866  Return Type - as per RTYPE
867  Details : Unsigned byte elements from mult0 are multiplied with
868  unsigned byte elements from cnst0 producing a result
869  twice the size of input i.e. unsigned halfword.
870  Then this multiplication results of adjacent odd-even elements
871  are added to the out vector
872  (2 unsigned halfword results)
873 */
874 #define DPADD_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
875 { \
876  out0 = (RTYPE) __msa_dpadd_u_h((v8u16) out0, \
877  (v16u8) mult0, (v16u8) cnst0); \
878  out1 = (RTYPE) __msa_dpadd_u_h((v8u16) out1, \
879  (v16u8) mult1, (v16u8) cnst1); \
880 }
881 #define DPADD_UB2_UH(...) DPADD_UB2(v8u16, __VA_ARGS__)
882 
883 /* Description : Dot product & addition of halfword vector elements
884  Arguments : Inputs - mult0, mult1
885  cnst0, cnst1
886  Outputs - out0, out1
887  Return Type - as per RTYPE
888  Details : Signed halfword elements from mult0 are multiplied with
889  signed halfword elements from cnst0 producing a result
890  twice the size of input i.e. signed word.
891  Then this multiplication results of adjacent odd-even elements
892  are added to the out vector
893  (2 signed word results)
894 */
895 #define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
896 { \
897  out0 = (RTYPE) __msa_dpadd_s_w((v4i32) out0, \
898  (v8i16) mult0, (v8i16) cnst0); \
899  out1 = (RTYPE) __msa_dpadd_s_w((v4i32) out1, \
900  (v8i16) mult1, (v8i16) cnst1); \
901 }
902 #define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
903 
904 #define DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3, \
905  cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
906 { \
907  DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
908  DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
909 }
910 #define DPADD_SH4_SW(...) DPADD_SH4(v4i32, __VA_ARGS__)
911 
912 /* Description : Minimum values between unsigned elements of
913  either vector are copied to the output vector
914  Arguments : Inputs - in0, in1, min_vec
915  Outputs - in0, in1, (in place)
916  Return Type - as per RTYPE
917  Details : Minimum of unsigned halfword element values from 'in0' and
918  'min_value' are written to output vector 'in0'
919 */
920 #define MIN_UH2(RTYPE, in0, in1, min_vec) \
921 { \
922  in0 = (RTYPE) __msa_min_u_h((v8u16) in0, min_vec); \
923  in1 = (RTYPE) __msa_min_u_h((v8u16) in1, min_vec); \
924 }
925 #define MIN_UH2_UH(...) MIN_UH2(v8u16, __VA_ARGS__)
926 
927 #define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) \
928 { \
929  MIN_UH2(RTYPE, in0, in1, min_vec); \
930  MIN_UH2(RTYPE, in2, in3, min_vec); \
931 }
932 #define MIN_UH4_UH(...) MIN_UH4(v8u16, __VA_ARGS__)
933 
934 /* Description : Clips all halfword elements of input vector between min & max
935  out = ((in) < (min)) ? (min) : (((in) > (max)) ? (max) : (in))
936  Arguments : Inputs - in (input vector)
937  - min (min threshold)
938  - max (max threshold)
939  Outputs - out_m (output vector with clipped elements)
940  Return Type - signed halfword
941 */
942 #define CLIP_SH(in, min, max) \
943 ( { \
944  v8i16 out_m; \
945  \
946  out_m = __msa_max_s_h((v8i16) min, (v8i16) in); \
947  out_m = __msa_min_s_h((v8i16) max, (v8i16) out_m); \
948  out_m; \
949 } )
950 
951 /* Description : Clips all signed halfword elements of input vector
952  between 0 & 255
953  Arguments : Inputs - in (input vector)
954  Outputs - out_m (output vector with clipped elements)
955  Return Type - signed halfword
956 */
957 #define CLIP_SH_0_255(in) \
958 ( { \
959  v8i16 max_m = __msa_ldi_h(255); \
960  v8i16 out_m; \
961  \
962  out_m = __msa_maxi_s_h((v8i16) in, 0); \
963  out_m = __msa_min_s_h((v8i16) max_m, (v8i16) out_m); \
964  out_m; \
965 } )
966 #define CLIP_SH2_0_255(in0, in1) \
967 { \
968  in0 = CLIP_SH_0_255(in0); \
969  in1 = CLIP_SH_0_255(in1); \
970 }
971 #define CLIP_SH4_0_255(in0, in1, in2, in3) \
972 { \
973  CLIP_SH2_0_255(in0, in1); \
974  CLIP_SH2_0_255(in2, in3); \
975 }
976 
977 #define CLIP_SH_0_255_MAX_SATU(in) \
978 ( { \
979  v8i16 out_m; \
980  \
981  out_m = __msa_maxi_s_h((v8i16) in, 0); \
982  out_m = (v8i16) __msa_sat_u_h((v8u16) out_m, 7); \
983  out_m; \
984 } )
985 #define CLIP_SH2_0_255_MAX_SATU(in0, in1) \
986 { \
987  in0 = CLIP_SH_0_255_MAX_SATU(in0); \
988  in1 = CLIP_SH_0_255_MAX_SATU(in1); \
989 }
990 #define CLIP_SH4_0_255_MAX_SATU(in0, in1, in2, in3) \
991 { \
992  CLIP_SH2_0_255_MAX_SATU(in0, in1); \
993  CLIP_SH2_0_255_MAX_SATU(in2, in3); \
994 }
995 
996 /* Description : Clips all signed word elements of input vector
997  between 0 & 255
998  Arguments : Inputs - in (input vector)
999  Outputs - out_m (output vector with clipped elements)
1000  Return Type - signed word
1001 */
1002 #define CLIP_SW_0_255(in) \
1003 ( { \
1004  v4i32 max_m = __msa_ldi_w(255); \
1005  v4i32 out_m; \
1006  \
1007  out_m = __msa_maxi_s_w((v4i32) in, 0); \
1008  out_m = __msa_min_s_w((v4i32) max_m, (v4i32) out_m); \
1009  out_m; \
1010 } )
1011 
1012 #define CLIP_SW_0_255_MAX_SATU(in) \
1013 ( { \
1014  v4i32 out_m; \
1015  \
1016  out_m = __msa_maxi_s_w((v4i32) in, 0); \
1017  out_m = (v4i32) __msa_sat_u_w((v4u32) out_m, 7); \
1018  out_m; \
1019 } )
1020 #define CLIP_SW2_0_255_MAX_SATU(in0, in1) \
1021 { \
1022  in0 = CLIP_SW_0_255_MAX_SATU(in0); \
1023  in1 = CLIP_SW_0_255_MAX_SATU(in1); \
1024 }
1025 #define CLIP_SW4_0_255_MAX_SATU(in0, in1, in2, in3) \
1026 { \
1027  CLIP_SW2_0_255_MAX_SATU(in0, in1); \
1028  CLIP_SW2_0_255_MAX_SATU(in2, in3); \
1029 }
1030 
1031 /* Description : Addition of 4 signed word elements
1032  4 signed word elements of input vector are added together and
1033  resulted integer sum is returned
1034  Arguments : Inputs - in (signed word vector)
1035  Outputs - sum_m (i32 sum)
1036  Return Type - signed word
1037 */
1038 #define HADD_SW_S32(in) \
1039 ( { \
1040  v2i64 res0_m, res1_m; \
1041  int32_t sum_m; \
1042  \
1043  res0_m = __msa_hadd_s_d((v4i32) in, (v4i32) in); \
1044  res1_m = __msa_splati_d(res0_m, 1); \
1045  res0_m += res1_m; \
1046  sum_m = __msa_copy_s_w((v4i32) res0_m, 0); \
1047  sum_m; \
1048 } )
1049 
1050 /* Description : Addition of 8 unsigned halfword elements
1051  8 unsigned halfword elements of input vector are added
1052  together and resulted integer sum is returned
1053  Arguments : Inputs - in (unsigned halfword vector)
1054  Outputs - sum_m (u32 sum)
1055  Return Type - unsigned word
1056 */
1057 #define HADD_UH_U32(in) \
1058 ( { \
1059  v4u32 res_m; \
1060  v2u64 res0_m, res1_m; \
1061  uint32_t sum_m; \
1062  \
1063  res_m = __msa_hadd_u_w((v8u16) in, (v8u16) in); \
1064  res0_m = __msa_hadd_u_d(res_m, res_m); \
1065  res1_m = (v2u64) __msa_splati_d((v2i64) res0_m, 1); \
1066  res0_m += res1_m; \
1067  sum_m = __msa_copy_u_w((v4i32) res0_m, 0); \
1068  sum_m; \
1069 } )
1070 
1071 /* Description : Horizontal addition of signed byte vector elements
1072  Arguments : Inputs - in0, in1
1073  Outputs - out0, out1
1074  Return Type - as per RTYPE
1075  Details : Each signed odd byte element from 'in0' is added to
1076  even signed byte element from 'in0' (pairwise) and the
1077  halfword result is stored in 'out0'
1078 */
1079 #define HADD_SB2(RTYPE, in0, in1, out0, out1) \
1080 { \
1081  out0 = (RTYPE) __msa_hadd_s_h((v16i8) in0, (v16i8) in0); \
1082  out1 = (RTYPE) __msa_hadd_s_h((v16i8) in1, (v16i8) in1); \
1083 }
1084 #define HADD_SB2_SH(...) HADD_SB2(v8i16, __VA_ARGS__)
1085 
1086 #define HADD_SB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1087 { \
1088  HADD_SB2(RTYPE, in0, in1, out0, out1); \
1089  HADD_SB2(RTYPE, in2, in3, out2, out3); \
1090 }
1091 #define HADD_SB4_UH(...) HADD_SB4(v8u16, __VA_ARGS__)
1092 #define HADD_SB4_SH(...) HADD_SB4(v8i16, __VA_ARGS__)
1093 
1094 /* Description : Horizontal addition of unsigned byte vector elements
1095  Arguments : Inputs - in0, in1
1096  Outputs - out0, out1
1097  Return Type - as per RTYPE
1098  Details : Each unsigned odd byte element from 'in0' is added to
1099  even unsigned byte element from 'in0' (pairwise) and the
1100  halfword result is stored in 'out0'
1101 */
1102 #define HADD_UB2(RTYPE, in0, in1, out0, out1) \
1103 { \
1104  out0 = (RTYPE) __msa_hadd_u_h((v16u8) in0, (v16u8) in0); \
1105  out1 = (RTYPE) __msa_hadd_u_h((v16u8) in1, (v16u8) in1); \
1106 }
1107 #define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__)
1108 
1109 #define HADD_UB3(RTYPE, in0, in1, in2, out0, out1, out2) \
1110 { \
1111  HADD_UB2(RTYPE, in0, in1, out0, out1); \
1112  out2 = (RTYPE) __msa_hadd_u_h((v16u8) in2, (v16u8) in2); \
1113 }
1114 #define HADD_UB3_UH(...) HADD_UB3(v8u16, __VA_ARGS__)
1115 
1116 #define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1117 { \
1118  HADD_UB2(RTYPE, in0, in1, out0, out1); \
1119  HADD_UB2(RTYPE, in2, in3, out2, out3); \
1120 }
1121 #define HADD_UB4_UB(...) HADD_UB4(v16u8, __VA_ARGS__)
1122 #define HADD_UB4_UH(...) HADD_UB4(v8u16, __VA_ARGS__)
1123 #define HADD_UB4_SH(...) HADD_UB4(v8i16, __VA_ARGS__)
1124 
1125 /* Description : Horizontal subtraction of unsigned byte vector elements
1126  Arguments : Inputs - in0, in1
1127  Outputs - out0, out1
1128  Return Type - as per RTYPE
1129  Details : Each unsigned odd byte element from 'in0' is subtracted from
1130  even unsigned byte element from 'in0' (pairwise) and the
1131  halfword result is stored in 'out0'
1132 */
1133 #define HSUB_UB2(RTYPE, in0, in1, out0, out1) \
1134 { \
1135  out0 = (RTYPE) __msa_hsub_u_h((v16u8) in0, (v16u8) in0); \
1136  out1 = (RTYPE) __msa_hsub_u_h((v16u8) in1, (v16u8) in1); \
1137 }
1138 #define HSUB_UB2_UH(...) HSUB_UB2(v8u16, __VA_ARGS__)
1139 #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
1140 
1141 #define HSUB_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1142 { \
1143  HSUB_UB2(RTYPE, in0, in1, out0, out1); \
1144  HSUB_UB2(RTYPE, in2, in3, out2, out3); \
1145 }
1146 #define HSUB_UB4_UH(...) HSUB_UB4(v8u16, __VA_ARGS__)
1147 #define HSUB_UB4_SH(...) HSUB_UB4(v8i16, __VA_ARGS__)
1148 
1149 /* Description : SAD (Sum of Absolute Difference)
1150  Arguments : Inputs - in0, in1, ref0, ref1 (unsigned byte src & ref)
1151  Outputs - sad_m (halfword vector with sad)
1152  Return Type - unsigned halfword
1153  Details : Absolute difference of all the byte elements from 'in0' with
1154  'ref0' is calculated and preserved in 'diff0'. From the 16
1155  unsigned absolute diff values, even-odd pairs are added
1156  together to generate 8 halfword results.
1157 */
1158 #if HAVE_MSA2
1159 #define SAD_UB2_UH(in0, in1, ref0, ref1) \
1160 ( { \
1161  v8u16 sad_m = { 0 }; \
1162  sad_m += __builtin_msa2_sad_adj2_u_w2x_b((v16u8) in0, (v16u8) ref0); \
1163  sad_m += __builtin_msa2_sad_adj2_u_w2x_b((v16u8) in1, (v16u8) ref1); \
1164  sad_m; \
1165 } )
1166 #else
1167 #define SAD_UB2_UH(in0, in1, ref0, ref1) \
1168 ( { \
1169  v16u8 diff0_m, diff1_m; \
1170  v8u16 sad_m = { 0 }; \
1171  \
1172  diff0_m = __msa_asub_u_b((v16u8) in0, (v16u8) ref0); \
1173  diff1_m = __msa_asub_u_b((v16u8) in1, (v16u8) ref1); \
1174  \
1175  sad_m += __msa_hadd_u_h((v16u8) diff0_m, (v16u8) diff0_m); \
1176  sad_m += __msa_hadd_u_h((v16u8) diff1_m, (v16u8) diff1_m); \
1177  \
1178  sad_m; \
1179 } )
1180 #endif // #if HAVE_MSA2
1181 
1182 /* Description : Insert specified word elements from input vectors to 1
1183  destination vector
1184  Arguments : Inputs - in0, in1, in2, in3 (4 input vectors)
1185  Outputs - out (output vector)
1186  Return Type - as per RTYPE
1187 */
1188 #define INSERT_W2(RTYPE, in0, in1, out) \
1189 { \
1190  out = (RTYPE) __msa_insert_w((v4i32) out, 0, in0); \
1191  out = (RTYPE) __msa_insert_w((v4i32) out, 1, in1); \
1192 }
1193 #define INSERT_W2_UB(...) INSERT_W2(v16u8, __VA_ARGS__)
1194 #define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
1195 
1196 #define INSERT_W4(RTYPE, in0, in1, in2, in3, out) \
1197 { \
1198  out = (RTYPE) __msa_insert_w((v4i32) out, 0, in0); \
1199  out = (RTYPE) __msa_insert_w((v4i32) out, 1, in1); \
1200  out = (RTYPE) __msa_insert_w((v4i32) out, 2, in2); \
1201  out = (RTYPE) __msa_insert_w((v4i32) out, 3, in3); \
1202 }
1203 #define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
1204 #define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
1205 #define INSERT_W4_SH(...) INSERT_W4(v8i16, __VA_ARGS__)
1206 #define INSERT_W4_SW(...) INSERT_W4(v4i32, __VA_ARGS__)
1207 
1208 /* Description : Insert specified double word elements from input vectors to 1
1209  destination vector
1210  Arguments : Inputs - in0, in1 (2 input vectors)
1211  Outputs - out (output vector)
1212  Return Type - as per RTYPE
1213 */
1214 #define INSERT_D2(RTYPE, in0, in1, out) \
1215 { \
1216  out = (RTYPE) __msa_insert_d((v2i64) out, 0, in0); \
1217  out = (RTYPE) __msa_insert_d((v2i64) out, 1, in1); \
1218 }
1219 #define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__)
1220 #define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
1221 #define INSERT_D2_SH(...) INSERT_D2(v8i16, __VA_ARGS__)
1222 #define INSERT_D2_SD(...) INSERT_D2(v2i64, __VA_ARGS__)
1223 
1224 /* Description : Interleave even byte elements from vectors
1225  Arguments : Inputs - in0, in1, in2, in3
1226  Outputs - out0, out1
1227  Return Type - as per RTYPE
1228  Details : Even byte elements of 'in0' and even byte
1229  elements of 'in1' are interleaved and copied to 'out0'
1230  Even byte elements of 'in2' and even byte
1231  elements of 'in3' are interleaved and copied to 'out1'
1232 */
1233 #define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1234 { \
1235  out0 = (RTYPE) __msa_ilvev_b((v16i8) in1, (v16i8) in0); \
1236  out1 = (RTYPE) __msa_ilvev_b((v16i8) in3, (v16i8) in2); \
1237 }
1238 #define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
1239 #define ILVEV_B2_SB(...) ILVEV_B2(v16i8, __VA_ARGS__)
1240 #define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
1241 #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
1242 
1243 /* Description : Interleave even halfword elements from vectors
1244  Arguments : Inputs - in0, in1, in2, in3
1245  Outputs - out0, out1
1246  Return Type - as per RTYPE
1247  Details : Even halfword elements of 'in0' and even halfword
1248  elements of 'in1' are interleaved and copied to 'out0'
1249  Even halfword elements of 'in2' and even halfword
1250  elements of 'in3' are interleaved and copied to 'out1'
1251 */
1252 #define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1253 { \
1254  out0 = (RTYPE) __msa_ilvev_h((v8i16) in1, (v8i16) in0); \
1255  out1 = (RTYPE) __msa_ilvev_h((v8i16) in3, (v8i16) in2); \
1256 }
1257 #define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
1258 #define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
1259 #define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
1260 
1261 /* Description : Interleave even word elements from vectors
1262  Arguments : Inputs - in0, in1, in2, in3
1263  Outputs - out0, out1
1264  Return Type - as per RTYPE
1265  Details : Even word elements of 'in0' and even word
1266  elements of 'in1' are interleaved and copied to 'out0'
1267  Even word elements of 'in2' and even word
1268  elements of 'in3' are interleaved and copied to 'out1'
1269 */
1270 #define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1271 { \
1272  out0 = (RTYPE) __msa_ilvev_w((v4i32) in1, (v4i32) in0); \
1273  out1 = (RTYPE) __msa_ilvev_w((v4i32) in3, (v4i32) in2); \
1274 }
1275 #define ILVEV_W2_UB(...) ILVEV_W2(v16u8, __VA_ARGS__)
1276 #define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
1277 #define ILVEV_W2_UH(...) ILVEV_W2(v8u16, __VA_ARGS__)
1278 #define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__)
1279 
1280 /* Description : Interleave even double word elements from vectors
1281  Arguments : Inputs - in0, in1, in2, in3
1282  Outputs - out0, out1
1283  Return Type - as per RTYPE
1284  Details : Even double word elements of 'in0' and even double word
1285  elements of 'in1' are interleaved and copied to 'out0'
1286  Even double word elements of 'in2' and even double word
1287  elements of 'in3' are interleaved and copied to 'out1'
1288 */
1289 #define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1290 { \
1291  out0 = (RTYPE) __msa_ilvev_d((v2i64) in1, (v2i64) in0); \
1292  out1 = (RTYPE) __msa_ilvev_d((v2i64) in3, (v2i64) in2); \
1293 }
1294 #define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
1295 #define ILVEV_D2_SB(...) ILVEV_D2(v16i8, __VA_ARGS__)
1296 #define ILVEV_D2_SW(...) ILVEV_D2(v4i32, __VA_ARGS__)
1297 
1298 /* Description : Interleave left half of byte elements from vectors
1299  Arguments : Inputs - in0, in1, in2, in3
1300  Outputs - out0, out1
1301  Return Type - as per RTYPE
1302  Details : Left half of byte elements of in0 and left half of byte
1303  elements of in1 are interleaved and copied to out0.
1304  Left half of byte elements of in2 and left half of byte
1305  elements of in3 are interleaved and copied to out1.
1306 */
1307 #define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1308 { \
1309  out0 = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1); \
1310  out1 = (RTYPE) __msa_ilvl_b((v16i8) in2, (v16i8) in3); \
1311 }
1312 #define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
1313 #define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
1314 #define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__)
1315 #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
1316 
1317 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1318  out0, out1, out2, out3) \
1319 { \
1320  ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1321  ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1322 }
1323 #define ILVL_B4_UB(...) ILVL_B4(v16u8, __VA_ARGS__)
1324 #define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__)
1325 #define ILVL_B4_UH(...) ILVL_B4(v8u16, __VA_ARGS__)
1326 #define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__)
1327 
1328 /* Description : Interleave left half of halfword elements from vectors
1329  Arguments : Inputs - in0, in1, in2, in3
1330  Outputs - out0, out1
1331  Return Type - as per RTYPE
1332  Details : Left half of halfword elements of in0 and left half of halfword
1333  elements of in1 are interleaved and copied to out0.
1334  Left half of halfword elements of in2 and left half of halfword
1335  elements of in3 are interleaved and copied to out1.
1336 */
1337 #define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1338 { \
1339  out0 = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \
1340  out1 = (RTYPE) __msa_ilvl_h((v8i16) in2, (v8i16) in3); \
1341 }
1342 #define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__)
1343 #define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__)
1344 
1345 #define ILVL_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1346  out0, out1, out2, out3) \
1347 { \
1348  ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1349  ILVL_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1350 }
1351 #define ILVL_H4_SH(...) ILVL_H4(v8i16, __VA_ARGS__)
1352 #define ILVL_H4_SW(...) ILVL_H4(v4i32, __VA_ARGS__)
1353 
1354 /* Description : Interleave left half of word elements from vectors
1355  Arguments : Inputs - in0, in1, in2, in3
1356  Outputs - out0, out1
1357  Return Type - as per RTYPE
1358  Details : Left half of word elements of in0 and left half of word
1359  elements of in1 are interleaved and copied to out0.
1360  Left half of word elements of in2 and left half of word
1361  elements of in3 are interleaved and copied to out1.
1362 */
1363 #define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1364 { \
1365  out0 = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \
1366  out1 = (RTYPE) __msa_ilvl_w((v4i32) in2, (v4i32) in3); \
1367 }
1368 #define ILVL_W2_UB(...) ILVL_W2(v16u8, __VA_ARGS__)
1369 #define ILVL_W2_SB(...) ILVL_W2(v16i8, __VA_ARGS__)
1370 #define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__)
1371 
1372 /* Description : Interleave right half of byte elements from vectors
1373  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
1374  Outputs - out0, out1, out2, out3
1375  Return Type - as per RTYPE
1376  Details : Right half of byte elements of in0 and right half of byte
1377  elements of in1 are interleaved and copied to out0.
1378  Right half of byte elements of in2 and right half of byte
1379  elements of in3 are interleaved and copied to out1.
1380  Similar for other pairs
1381 */
1382 #define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1383 { \
1384  out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \
1385  out1 = (RTYPE) __msa_ilvr_b((v16i8) in2, (v16i8) in3); \
1386 }
1387 #define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
1388 #define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
1389 #define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__)
1390 #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
1391 #define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__)
1392 
1393 #define ILVR_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1394 { \
1395  ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1396  out2 = (RTYPE) __msa_ilvr_b((v16i8) in4, (v16i8) in5); \
1397 }
1398 #define ILVR_B3_UB(...) ILVR_B3(v16u8, __VA_ARGS__)
1399 #define ILVR_B3_SB(...) ILVR_B3(v16i8, __VA_ARGS__)
1400 #define ILVR_B3_UH(...) ILVR_B3(v8u16, __VA_ARGS__)
1401 #define ILVR_B3_SH(...) ILVR_B3(v8i16, __VA_ARGS__)
1402 
1403 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1404  out0, out1, out2, out3) \
1405 { \
1406  ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1407  ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1408 }
1409 #define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
1410 #define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
1411 #define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
1412 #define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
1413 #define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__)
1414 
1415 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1416  in8, in9, in10, in11, in12, in13, in14, in15, \
1417  out0, out1, out2, out3, out4, out5, out6, out7) \
1418 { \
1419  ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1420  out0, out1, out2, out3); \
1421  ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, \
1422  out4, out5, out6, out7); \
1423 }
1424 #define ILVR_B8_UH(...) ILVR_B8(v8u16, __VA_ARGS__)
1425 
1426 /* Description : Interleave right half of halfword elements from vectors
1427  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
1428  Outputs - out0, out1, out2, out3
1429  Return Type - as per RTYPE
1430  Details : Right half of halfword elements of in0 and right half of
1431  halfword elements of in1 are interleaved and copied to out0.
1432  Right half of halfword elements of in2 and right half of
1433  halfword elements of in3 are interleaved and copied to out1.
1434  Similar for other pairs
1435 */
1436 #define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1437 { \
1438  out0 = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \
1439  out1 = (RTYPE) __msa_ilvr_h((v8i16) in2, (v8i16) in3); \
1440 }
1441 #define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
1442 #define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
1443 
1444 #define ILVR_H3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1445 { \
1446  ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1447  out2 = (RTYPE) __msa_ilvr_h((v8i16) in4, (v8i16) in5); \
1448 }
1449 #define ILVR_H3_SH(...) ILVR_H3(v8i16, __VA_ARGS__)
1450 
1451 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1452  out0, out1, out2, out3) \
1453 { \
1454  ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1455  ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1456 }
1457 #define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
1458 #define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
1459 
1460 #define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1461 { \
1462  out0 = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \
1463  out1 = (RTYPE) __msa_ilvr_w((v4i32) in2, (v4i32) in3); \
1464 }
1465 #define ILVR_W2_UB(...) ILVR_W2(v16u8, __VA_ARGS__)
1466 #define ILVR_W2_SB(...) ILVR_W2(v16i8, __VA_ARGS__)
1467 #define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__)
1468 
1469 #define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1470  out0, out1, out2, out3) \
1471 { \
1472  ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1); \
1473  ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3); \
1474 }
1475 #define ILVR_W4_SB(...) ILVR_W4(v16i8, __VA_ARGS__)
1476 #define ILVR_W4_UB(...) ILVR_W4(v16u8, __VA_ARGS__)
1477 
1478 /* Description : Interleave right half of double word elements from vectors
1479  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
1480  Outputs - out0, out1, out2, out3
1481  Return Type - as per RTYPE
1482  Details : Right half of double word elements of in0 and right half of
1483  double word elements of in1 are interleaved and copied to out0.
1484  Right half of double word elements of in2 and right half of
1485  double word elements of in3 are interleaved and copied to out1.
1486 */
1487 #define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1488 { \
1489  out0 = (RTYPE) __msa_ilvr_d((v2i64) in0, (v2i64) in1); \
1490  out1 = (RTYPE) __msa_ilvr_d((v2i64) in2, (v2i64) in3); \
1491 }
1492 #define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
1493 #define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
1494 #define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
1495 
1496 #define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1497 { \
1498  ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1499  out2 = (RTYPE) __msa_ilvr_d((v2i64) in4, (v2i64) in5); \
1500 }
1501 #define ILVR_D3_SB(...) ILVR_D3(v16i8, __VA_ARGS__)
1502 
1503 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1504  out0, out1, out2, out3) \
1505 { \
1506  ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1507  ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
1508 }
1509 #define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
1510 #define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
1511 
1512 /* Description : Interleave left half of double word elements from vectors
1513  Arguments : Inputs - in0, in1, in2, in3
1514  Outputs - out0, out1
1515  Return Type - as per RTYPE
1516  Details : Left half of double word elements of in0 and left half of
1517  double word elements of in1 are interleaved and copied to out0.
1518  Left half of double word elements of in2 and left half of
1519  double word elements of in3 are interleaved and copied to out1.
1520 */
1521 #define ILVL_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1522 { \
1523  out0 = (RTYPE) __msa_ilvl_d((v2i64) in0, (v2i64) in1); \
1524  out1 = (RTYPE) __msa_ilvl_d((v2i64) in2, (v2i64) in3); \
1525 }
1526 #define ILVL_D2_UB(...) ILVL_D2(v16u8, __VA_ARGS__)
1527 #define ILVL_D2_SB(...) ILVL_D2(v16i8, __VA_ARGS__)
1528 #define ILVL_D2_SH(...) ILVL_D2(v8i16, __VA_ARGS__)
1529 
1530 /* Description : Interleave both left and right half of input vectors
1531  Arguments : Inputs - in0, in1
1532  Outputs - out0, out1
1533  Return Type - as per RTYPE
1534  Details : Right half of byte elements from 'in0' and 'in1' are
1535  interleaved and stored to 'out0'
1536  Left half of byte elements from 'in0' and 'in1' are
1537  interleaved and stored to 'out1'
1538 */
1539 #define ILVRL_B2(RTYPE, in0, in1, out0, out1) \
1540 { \
1541  out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \
1542  out1 = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1); \
1543 }
1544 #define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
1545 #define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
1546 #define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
1547 #define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
1548 #define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__)
1549 
1550 #define ILVRL_H2(RTYPE, in0, in1, out0, out1) \
1551 { \
1552  out0 = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \
1553  out1 = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \
1554 }
1555 #define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__)
1556 #define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__)
1557 #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
1558 #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
1559 
1560 #define ILVRL_W2(RTYPE, in0, in1, out0, out1) \
1561 { \
1562  out0 = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \
1563  out1 = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \
1564 }
1565 #define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
1566 #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
1567 #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
1568 
1569 /* Description : Maximum values between signed elements of vector and
1570  5-bit signed immediate value are copied to the output vector
1571  Arguments : Inputs - in0, in1, in2, in3, max_val
1572  Outputs - in0, in1, in2, in3 (in place)
1573  Return Type - as per RTYPE
1574  Details : Maximum of signed halfword element values from 'in0' and
1575  'max_val' are written to output vector 'in0'
1576 */
1577 #define MAXI_SH2(RTYPE, in0, in1, max_val) \
1578 { \
1579  in0 = (RTYPE) __msa_maxi_s_h((v8i16) in0, max_val); \
1580  in1 = (RTYPE) __msa_maxi_s_h((v8i16) in1, max_val); \
1581 }
1582 #define MAXI_SH2_UH(...) MAXI_SH2(v8u16, __VA_ARGS__)
1583 #define MAXI_SH2_SH(...) MAXI_SH2(v8i16, __VA_ARGS__)
1584 
1585 #define MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val) \
1586 { \
1587  MAXI_SH2(RTYPE, in0, in1, max_val); \
1588  MAXI_SH2(RTYPE, in2, in3, max_val); \
1589 }
1590 #define MAXI_SH4_UH(...) MAXI_SH4(v8u16, __VA_ARGS__)
1591 #define MAXI_SH4_SH(...) MAXI_SH4(v8i16, __VA_ARGS__)
1592 
1593 #define MAXI_SH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, max_val) \
1594 { \
1595  MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val); \
1596  MAXI_SH4(RTYPE, in4, in5, in6, in7, max_val); \
1597 }
1598 #define MAXI_SH8_UH(...) MAXI_SH8(v8u16, __VA_ARGS__)
1599 #define MAXI_SH8_SH(...) MAXI_SH8(v8i16, __VA_ARGS__)
1600 
1601 /* Description : Saturate the halfword element values to the max
1602  unsigned value of (sat_val+1 bits)
1603  The element data width remains unchanged
1604  Arguments : Inputs - in0, in1, in2, in3, sat_val
1605  Outputs - in0, in1, in2, in3 (in place)
1606  Return Type - as per RTYPE
1607  Details : Each unsigned halfword element from 'in0' is saturated to the
1608  value generated with (sat_val+1) bit range
1609  Results are in placed to original vectors
1610 */
1611 #define SAT_UH2(RTYPE, in0, in1, sat_val) \
1612 { \
1613  in0 = (RTYPE) __msa_sat_u_h((v8u16) in0, sat_val); \
1614  in1 = (RTYPE) __msa_sat_u_h((v8u16) in1, sat_val); \
1615 }
1616 #define SAT_UH2_UH(...) SAT_UH2(v8u16, __VA_ARGS__)
1617 #define SAT_UH2_SH(...) SAT_UH2(v8i16, __VA_ARGS__)
1618 
1619 #define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) \
1620 { \
1621  SAT_UH2(RTYPE, in0, in1, sat_val); \
1622  SAT_UH2(RTYPE, in2, in3, sat_val); \
1623 }
1624 #define SAT_UH4_UH(...) SAT_UH4(v8u16, __VA_ARGS__)
1625 #define SAT_UH4_SH(...) SAT_UH4(v8i16, __VA_ARGS__)
1626 
1627 #define SAT_UH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, sat_val) \
1628 { \
1629  SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val); \
1630  SAT_UH4(RTYPE, in4, in5, in6, in7, sat_val); \
1631 }
1632 #define SAT_UH8_UH(...) SAT_UH8(v8u16, __VA_ARGS__)
1633 #define SAT_UH8_SH(...) SAT_UH8(v8i16, __VA_ARGS__)
1634 
1635 /* Description : Saturate the halfword element values to the max
1636  unsigned value of (sat_val+1 bits)
1637  The element data width remains unchanged
1638  Arguments : Inputs - in0, in1, in2, in3, sat_val
1639  Outputs - in0, in1, in2, in3 (in place)
1640  Return Type - as per RTYPE
1641  Details : Each unsigned halfword element from 'in0' is saturated to the
1642  value generated with (sat_val+1) bit range
1643  Results are in placed to original vectors
1644 */
1645 #define SAT_SH2(RTYPE, in0, in1, sat_val) \
1646 { \
1647  in0 = (RTYPE) __msa_sat_s_h((v8i16) in0, sat_val); \
1648  in1 = (RTYPE) __msa_sat_s_h((v8i16) in1, sat_val); \
1649 }
1650 #define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__)
1651 
1652 #define SAT_SH3(RTYPE, in0, in1, in2, sat_val) \
1653 { \
1654  SAT_SH2(RTYPE, in0, in1, sat_val); \
1655  in2 = (RTYPE) __msa_sat_s_h((v8i16) in2, sat_val); \
1656 }
1657 #define SAT_SH3_SH(...) SAT_SH3(v8i16, __VA_ARGS__)
1658 
1659 #define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \
1660 { \
1661  SAT_SH2(RTYPE, in0, in1, sat_val); \
1662  SAT_SH2(RTYPE, in2, in3, sat_val); \
1663 }
1664 #define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__)
1665 
1666 /* Description : Saturate the word element values to the max
1667  unsigned value of (sat_val+1 bits)
1668  The element data width remains unchanged
1669  Arguments : Inputs - in0, in1, in2, in3, sat_val
1670  Outputs - in0, in1, in2, in3 (in place)
1671  Return Type - as per RTYPE
1672  Details : Each unsigned word element from 'in0' is saturated to the
1673  value generated with (sat_val+1) bit range
1674  Results are in placed to original vectors
1675 */
1676 #define SAT_SW2(RTYPE, in0, in1, sat_val) \
1677 { \
1678  in0 = (RTYPE) __msa_sat_s_w((v4i32) in0, sat_val); \
1679  in1 = (RTYPE) __msa_sat_s_w((v4i32) in1, sat_val); \
1680 }
1681 #define SAT_SW2_SW(...) SAT_SW2(v4i32, __VA_ARGS__)
1682 
1683 #define SAT_SW4(RTYPE, in0, in1, in2, in3, sat_val) \
1684 { \
1685  SAT_SW2(RTYPE, in0, in1, sat_val); \
1686  SAT_SW2(RTYPE, in2, in3, sat_val); \
1687 }
1688 #define SAT_SW4_SW(...) SAT_SW4(v4i32, __VA_ARGS__)
1689 
1690 /* Description : Indexed halfword element values are replicated to all
1691  elements in output vector
1692  Arguments : Inputs - in, idx0, idx1
1693  Outputs - out0, out1
1694  Return Type - as per RTYPE
1695  Details : 'idx0' element value from 'in' vector is replicated to all
1696  elements in 'out0' vector
1697  Valid index range for halfword operation is 0-7
1698 */
1699 #define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \
1700 { \
1701  out0 = (RTYPE) __msa_splati_h((v8i16) in, idx0); \
1702  out1 = (RTYPE) __msa_splati_h((v8i16) in, idx1); \
1703 }
1704 #define SPLATI_H2_SB(...) SPLATI_H2(v16i8, __VA_ARGS__)
1705 #define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__)
1706 
1707 #define SPLATI_H3(RTYPE, in, idx0, idx1, idx2, \
1708  out0, out1, out2) \
1709 { \
1710  SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
1711  out2 = (RTYPE) __msa_splati_h((v8i16) in, idx2); \
1712 }
1713 #define SPLATI_H3_SB(...) SPLATI_H3(v16i8, __VA_ARGS__)
1714 #define SPLATI_H3_SH(...) SPLATI_H3(v8i16, __VA_ARGS__)
1715 
1716 #define SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3, \
1717  out0, out1, out2, out3) \
1718 { \
1719  SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
1720  SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3); \
1721 }
1722 #define SPLATI_H4_SB(...) SPLATI_H4(v16i8, __VA_ARGS__)
1723 #define SPLATI_H4_SH(...) SPLATI_H4(v8i16, __VA_ARGS__)
1724 
1725 /* Description : Indexed word element values are replicated to all
1726  elements in output vector
1727  Arguments : Inputs - in, stidx
1728  Outputs - out0, out1
1729  Return Type - as per RTYPE
1730  Details : 'stidx' element value from 'in' vector is replicated to all
1731  elements in 'out0' vector
1732  'stidx + 1' element value from 'in' vector is replicated to all
1733  elements in 'out1' vector
1734  Valid index range for halfword operation is 0-3
1735 */
1736 #define SPLATI_W2(RTYPE, in, stidx, out0, out1) \
1737 { \
1738  out0 = (RTYPE) __msa_splati_w((v4i32) in, stidx); \
1739  out1 = (RTYPE) __msa_splati_w((v4i32) in, (stidx+1)); \
1740 }
1741 #define SPLATI_W2_SH(...) SPLATI_W2(v8i16, __VA_ARGS__)
1742 #define SPLATI_W2_SW(...) SPLATI_W2(v4i32, __VA_ARGS__)
1743 
1744 #define SPLATI_W4(RTYPE, in, out0, out1, out2, out3) \
1745 { \
1746  SPLATI_W2(RTYPE, in, 0, out0, out1); \
1747  SPLATI_W2(RTYPE, in, 2, out2, out3); \
1748 }
1749 #define SPLATI_W4_SH(...) SPLATI_W4(v8i16, __VA_ARGS__)
1750 #define SPLATI_W4_SW(...) SPLATI_W4(v4i32, __VA_ARGS__)
1751 
1752 /* Description : Pack even byte elements of vector pairs
1753  Arguments : Inputs - in0, in1, in2, in3
1754  Outputs - out0, out1
1755  Return Type - as per RTYPE
1756  Details : Even byte elements of in0 are copied to the left half of
1757  out0 & even byte elements of in1 are copied to the right
1758  half of out0.
1759  Even byte elements of in2 are copied to the left half of
1760  out1 & even byte elements of in3 are copied to the right
1761  half of out1.
1762 */
1763 #define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1764 { \
1765  out0 = (RTYPE) __msa_pckev_b((v16i8) in0, (v16i8) in1); \
1766  out1 = (RTYPE) __msa_pckev_b((v16i8) in2, (v16i8) in3); \
1767 }
1768 #define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
1769 #define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
1770 #define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
1771 #define PCKEV_B2_SW(...) PCKEV_B2(v4i32, __VA_ARGS__)
1772 
1773 #define PCKEV_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1774 { \
1775  PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1776  out2 = (RTYPE) __msa_pckev_b((v16i8) in4, (v16i8) in5); \
1777 }
1778 #define PCKEV_B3_UB(...) PCKEV_B3(v16u8, __VA_ARGS__)
1779 #define PCKEV_B3_SB(...) PCKEV_B3(v16i8, __VA_ARGS__)
1780 
1781 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1782  out0, out1, out2, out3) \
1783 { \
1784  PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1785  PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1786 }
1787 #define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
1788 #define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
1789 #define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
1790 #define PCKEV_B4_SW(...) PCKEV_B4(v4i32, __VA_ARGS__)
1791 
1792 /* Description : Pack even halfword elements of vector pairs
1793  Arguments : Inputs - in0, in1, in2, in3
1794  Outputs - out0, out1
1795  Return Type - as per RTYPE
1796  Details : Even halfword elements of in0 are copied to the left half of
1797  out0 & even halfword elements of in1 are copied to the right
1798  half of out0.
1799  Even halfword elements of in2 are copied to the left half of
1800  out1 & even halfword elements of in3 are copied to the right
1801  half of out1.
1802 */
1803 #define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1804 { \
1805  out0 = (RTYPE) __msa_pckev_h((v8i16) in0, (v8i16) in1); \
1806  out1 = (RTYPE) __msa_pckev_h((v8i16) in2, (v8i16) in3); \
1807 }
1808 #define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
1809 #define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__)
1810 
1811 #define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1812  out0, out1, out2, out3) \
1813 { \
1814  PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1815  PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1816 }
1817 #define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__)
1818 #define PCKEV_H4_SW(...) PCKEV_H4(v4i32, __VA_ARGS__)
1819 
1820 /* Description : Pack even double word elements of vector pairs
1821  Arguments : Inputs - in0, in1, in2, in3
1822  Outputs - out0, out1
1823  Return Type - as per RTYPE
1824  Details : Even double elements of in0 are copied to the left half of
1825  out0 & even double elements of in1 are copied to the right
1826  half of out0.
1827  Even double elements of in2 are copied to the left half of
1828  out1 & even double elements of in3 are copied to the right
1829  half of out1.
1830 */
1831 #define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1832 { \
1833  out0 = (RTYPE) __msa_pckev_d((v2i64) in0, (v2i64) in1); \
1834  out1 = (RTYPE) __msa_pckev_d((v2i64) in2, (v2i64) in3); \
1835 }
1836 #define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__)
1837 #define PCKEV_D2_SB(...) PCKEV_D2(v16i8, __VA_ARGS__)
1838 #define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__)
1839 
1840 #define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1841  out0, out1, out2, out3) \
1842 { \
1843  PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1844  PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
1845 }
1846 #define PCKEV_D4_UB(...) PCKEV_D4(v16u8, __VA_ARGS__)
1847 
1848 /* Description : Pack odd double word elements of vector pairs
1849  Arguments : Inputs - in0, in1
1850  Outputs - out0, out1
1851  Return Type - as per RTYPE
1852  Details : As operation is on same input 'in0' vector, index 1 double word
1853  element is overwritten to index 0 and result is written to out0
1854  As operation is on same input 'in1' vector, index 1 double word
1855  element is overwritten to index 0 and result is written to out1
1856 */
1857 #define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1858 { \
1859  out0 = (RTYPE) __msa_pckod_d((v2i64) in0, (v2i64) in1); \
1860  out1 = (RTYPE) __msa_pckod_d((v2i64) in2, (v2i64) in3); \
1861 }
1862 #define PCKOD_D2_UB(...) PCKOD_D2(v16u8, __VA_ARGS__)
1863 #define PCKOD_D2_SH(...) PCKOD_D2(v8i16, __VA_ARGS__)
1864 #define PCKOD_D2_SD(...) PCKOD_D2(v2i64, __VA_ARGS__)
1865 
1866 /* Description : Each byte element is logically xor'ed with immediate 128
1867  Arguments : Inputs - in0, in1
1868  Outputs - in0, in1 (in-place)
1869  Return Type - as per RTYPE
1870  Details : Each unsigned byte element from input vector 'in0' is
1871  logically xor'ed with 128 and result is in-place stored in
1872  'in0' vector
1873  Each unsigned byte element from input vector 'in1' is
1874  logically xor'ed with 128 and result is in-place stored in
1875  'in1' vector
1876  Similar for other pairs
1877 */
1878 #define XORI_B2_128(RTYPE, in0, in1) \
1879 { \
1880  in0 = (RTYPE) __msa_xori_b((v16u8) in0, 128); \
1881  in1 = (RTYPE) __msa_xori_b((v16u8) in1, 128); \
1882 }
1883 #define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__)
1884 #define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__)
1885 #define XORI_B2_128_SH(...) XORI_B2_128(v8i16, __VA_ARGS__)
1886 
1887 #define XORI_B3_128(RTYPE, in0, in1, in2) \
1888 { \
1889  XORI_B2_128(RTYPE, in0, in1); \
1890  in2 = (RTYPE) __msa_xori_b((v16u8) in2, 128); \
1891 }
1892 #define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__)
1893 
1894 #define XORI_B4_128(RTYPE, in0, in1, in2, in3) \
1895 { \
1896  XORI_B2_128(RTYPE, in0, in1); \
1897  XORI_B2_128(RTYPE, in2, in3); \
1898 }
1899 #define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__)
1900 #define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__)
1901 #define XORI_B4_128_SH(...) XORI_B4_128(v8i16, __VA_ARGS__)
1902 
1903 #define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \
1904 { \
1905  XORI_B3_128(RTYPE, in0, in1, in2); \
1906  XORI_B2_128(RTYPE, in3, in4); \
1907 }
1908 #define XORI_B5_128_SB(...) XORI_B5_128(v16i8, __VA_ARGS__)
1909 
1910 #define XORI_B6_128(RTYPE, in0, in1, in2, in3, in4, in5) \
1911 { \
1912  XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1913  XORI_B2_128(RTYPE, in4, in5); \
1914 }
1915 #define XORI_B6_128_SB(...) XORI_B6_128(v16i8, __VA_ARGS__)
1916 
1917 #define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) \
1918 { \
1919  XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1920  XORI_B3_128(RTYPE, in4, in5, in6); \
1921 }
1922 #define XORI_B7_128_SB(...) XORI_B7_128(v16i8, __VA_ARGS__)
1923 
1924 #define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \
1925 { \
1926  XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1927  XORI_B4_128(RTYPE, in4, in5, in6, in7); \
1928 }
1929 #define XORI_B8_128_SB(...) XORI_B8_128(v16i8, __VA_ARGS__)
1930 #define XORI_B8_128_UB(...) XORI_B8_128(v16u8, __VA_ARGS__)
1931 
1932 /* Description : Addition of signed halfword elements and signed saturation
1933  Arguments : Inputs - in0, in1, in2, in3
1934  Outputs - out0, out1
1935  Return Type - as per RTYPE
1936  Details : Signed halfword elements from 'in0' are added to signed
1937  halfword elements of 'in1'. The result is then signed saturated
1938  between -32768 to +32767 (as per halfword data type)
1939  Similar for other pairs
1940 */
1941 #define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1) \
1942 { \
1943  out0 = (RTYPE) __msa_adds_s_h((v8i16) in0, (v8i16) in1); \
1944  out1 = (RTYPE) __msa_adds_s_h((v8i16) in2, (v8i16) in3); \
1945 }
1946 #define ADDS_SH2_SH(...) ADDS_SH2(v8i16, __VA_ARGS__)
1947 
1948 #define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1949  out0, out1, out2, out3) \
1950 { \
1951  ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1); \
1952  ADDS_SH2(RTYPE, in4, in5, in6, in7, out2, out3); \
1953 }
1954 #define ADDS_SH4_UH(...) ADDS_SH4(v8u16, __VA_ARGS__)
1955 #define ADDS_SH4_SH(...) ADDS_SH4(v8i16, __VA_ARGS__)
1956 
1957 /* Description : Shift left all elements of vector (generic for all data types)
1958  Arguments : Inputs - in0, in1, in2, in3, shift
1959  Outputs - in0, in1, in2, in3 (in place)
1960  Return Type - as per input vector RTYPE
1961  Details : Each element of vector 'in0' is left shifted by 'shift' and
1962  result is in place written to 'in0'
1963  Similar for other pairs
1964 */
1965 #define SLLI_2V(in0, in1, shift) \
1966 { \
1967  in0 = in0 << shift; \
1968  in1 = in1 << shift; \
1969 }
1970 #define SLLI_4V(in0, in1, in2, in3, shift) \
1971 { \
1972  in0 = in0 << shift; \
1973  in1 = in1 << shift; \
1974  in2 = in2 << shift; \
1975  in3 = in3 << shift; \
1976 }
1977 
1978 /* Description : Arithmetic shift right all elements of vector
1979  (generic for all data types)
1980  Arguments : Inputs - in0, in1, in2, in3, shift
1981  Outputs - in0, in1, in2, in3 (in place)
1982  Return Type - as per input vector RTYPE
1983  Details : Each element of vector 'in0' is right shifted by 'shift' and
1984  result is in place written to 'in0'
1985  Here, 'shift' is GP variable passed in
1986  Similar for other pairs
1987 */
1988 #define SRA_4V(in0, in1, in2, in3, shift) \
1989 { \
1990  in0 = in0 >> shift; \
1991  in1 = in1 >> shift; \
1992  in2 = in2 >> shift; \
1993  in3 = in3 >> shift; \
1994 }
1995 
1996 /* Description : Shift right logical all halfword elements of vector
1997  Arguments : Inputs - in0, in1, in2, in3, shift
1998  Outputs - in0, in1, in2, in3 (in place)
1999  Return Type - as per RTYPE
2000  Details : Each element of vector 'in0' is shifted right logical by
2001  number of bits respective element holds in vector 'shift' and
2002  result is in place written to 'in0'
2003  Here, 'shift' is a vector passed in
2004  Similar for other pairs
2005 */
2006 #define SRL_H4(RTYPE, in0, in1, in2, in3, shift) \
2007 { \
2008  in0 = (RTYPE) __msa_srl_h((v8i16) in0, (v8i16) shift); \
2009  in1 = (RTYPE) __msa_srl_h((v8i16) in1, (v8i16) shift); \
2010  in2 = (RTYPE) __msa_srl_h((v8i16) in2, (v8i16) shift); \
2011  in3 = (RTYPE) __msa_srl_h((v8i16) in3, (v8i16) shift); \
2012 }
2013 #define SRL_H4_UH(...) SRL_H4(v8u16, __VA_ARGS__)
2014 
2015 #define SRLR_H4(RTYPE, in0, in1, in2, in3, shift) \
2016 { \
2017  in0 = (RTYPE) __msa_srlr_h((v8i16) in0, (v8i16) shift); \
2018  in1 = (RTYPE) __msa_srlr_h((v8i16) in1, (v8i16) shift); \
2019  in2 = (RTYPE) __msa_srlr_h((v8i16) in2, (v8i16) shift); \
2020  in3 = (RTYPE) __msa_srlr_h((v8i16) in3, (v8i16) shift); \
2021 }
2022 #define SRLR_H4_UH(...) SRLR_H4(v8u16, __VA_ARGS__)
2023 #define SRLR_H4_SH(...) SRLR_H4(v8i16, __VA_ARGS__)
2024 
2025 #define SRLR_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, shift) \
2026 { \
2027  SRLR_H4(RTYPE, in0, in1, in2, in3, shift); \
2028  SRLR_H4(RTYPE, in4, in5, in6, in7, shift); \
2029 }
2030 #define SRLR_H8_UH(...) SRLR_H8(v8u16, __VA_ARGS__)
2031 #define SRLR_H8_SH(...) SRLR_H8(v8i16, __VA_ARGS__)
2032 
2033 /* Description : Shift right arithmetic rounded halfwords
2034  Arguments : Inputs - in0, in1, shift
2035  Outputs - in0, in1, (in place)
2036  Return Type - as per RTYPE
2037  Details : Each element of vector 'in0' is shifted right arithmetic by
2038  number of bits respective element holds in vector 'shift'.
2039  The last discarded bit is added to shifted value for rounding
2040  and the result is in place written to 'in0'
2041  Here, 'shift' is a vector passed in
2042  Similar for other pairs
2043 */
2044 #define SRAR_H2(RTYPE, in0, in1, shift) \
2045 { \
2046  in0 = (RTYPE) __msa_srar_h((v8i16) in0, (v8i16) shift); \
2047  in1 = (RTYPE) __msa_srar_h((v8i16) in1, (v8i16) shift); \
2048 }
2049 #define SRAR_H2_UH(...) SRAR_H2(v8u16, __VA_ARGS__)
2050 #define SRAR_H2_SH(...) SRAR_H2(v8i16, __VA_ARGS__)
2051 
2052 #define SRAR_H3(RTYPE, in0, in1, in2, shift) \
2053 { \
2054  SRAR_H2(RTYPE, in0, in1, shift) \
2055  in2 = (RTYPE) __msa_srar_h((v8i16) in2, (v8i16) shift); \
2056 }
2057 #define SRAR_H3_SH(...) SRAR_H3(v8i16, __VA_ARGS__)
2058 
2059 #define SRAR_H4(RTYPE, in0, in1, in2, in3, shift) \
2060 { \
2061  SRAR_H2(RTYPE, in0, in1, shift) \
2062  SRAR_H2(RTYPE, in2, in3, shift) \
2063 }
2064 #define SRAR_H4_UH(...) SRAR_H4(v8u16, __VA_ARGS__)
2065 #define SRAR_H4_SH(...) SRAR_H4(v8i16, __VA_ARGS__)
2066 
2067 /* Description : Shift right arithmetic rounded words
2068  Arguments : Inputs - in0, in1, shift
2069  Outputs - in0, in1, (in place)
2070  Return Type - as per RTYPE
2071  Details : Each element of vector 'in0' is shifted right arithmetic by
2072  number of bits respective element holds in vector 'shift'.
2073  The last discarded bit is added to shifted value for rounding
2074  and the result is in place written to 'in0'
2075  Here, 'shift' is a vector passed in
2076  Similar for other pairs
2077 */
2078 #define SRAR_W2(RTYPE, in0, in1, shift) \
2079 { \
2080  in0 = (RTYPE) __msa_srar_w((v4i32) in0, (v4i32) shift); \
2081  in1 = (RTYPE) __msa_srar_w((v4i32) in1, (v4i32) shift); \
2082 }
2083 #define SRAR_W2_SW(...) SRAR_W2(v4i32, __VA_ARGS__)
2084 
2085 #define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \
2086 { \
2087  SRAR_W2(RTYPE, in0, in1, shift) \
2088  SRAR_W2(RTYPE, in2, in3, shift) \
2089 }
2090 #define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__)
2091 
2092 /* Description : Shift right arithmetic rounded (immediate)
2093  Arguments : Inputs - in0, in1, in2, in3, shift
2094  Outputs - in0, in1, in2, in3 (in place)
2095  Return Type - as per RTYPE
2096  Details : Each element of vector 'in0' is shifted right arithmetic by
2097  value in 'shift'.
2098  The last discarded bit is added to shifted value for rounding
2099  and the result is in place written to 'in0'
2100  Similar for other pairs
2101 */
2102 #define SRARI_H2(RTYPE, in0, in1, shift) \
2103 { \
2104  in0 = (RTYPE) __msa_srari_h((v8i16) in0, shift); \
2105  in1 = (RTYPE) __msa_srari_h((v8i16) in1, shift); \
2106 }
2107 #define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__)
2108 #define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__)
2109 
2110 #define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \
2111 { \
2112  SRARI_H2(RTYPE, in0, in1, shift); \
2113  SRARI_H2(RTYPE, in2, in3, shift); \
2114 }
2115 #define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__)
2116 #define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__)
2117 
2118 /* Description : Shift right arithmetic rounded (immediate)
2119  Arguments : Inputs - in0, in1, shift
2120  Outputs - in0, in1 (in place)
2121  Return Type - as per RTYPE
2122  Details : Each element of vector 'in0' is shifted right arithmetic by
2123  value in 'shift'.
2124  The last discarded bit is added to shifted value for rounding
2125  and the result is in place written to 'in0'
2126  Similar for other pairs
2127 */
2128 #define SRARI_W2(RTYPE, in0, in1, shift) \
2129 { \
2130  in0 = (RTYPE) __msa_srari_w((v4i32) in0, shift); \
2131  in1 = (RTYPE) __msa_srari_w((v4i32) in1, shift); \
2132 }
2133 #define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__)
2134 
2135 #define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \
2136 { \
2137  SRARI_W2(RTYPE, in0, in1, shift); \
2138  SRARI_W2(RTYPE, in2, in3, shift); \
2139 }
2140 #define SRARI_W4_SH(...) SRARI_W4(v8i16, __VA_ARGS__)
2141 #define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
2142 
2143 /* Description : Multiplication of pairs of vectors
2144  Arguments : Inputs - in0, in1, in2, in3
2145  Outputs - out0, out1
2146  Details : Each element from 'in0' is multiplied with elements from 'in1'
2147  and result is written to 'out0'
2148  Similar for other pairs
2149 */
2150 #define MUL2(in0, in1, in2, in3, out0, out1) \
2151 { \
2152  out0 = in0 * in1; \
2153  out1 = in2 * in3; \
2154 }
2155 #define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2156 { \
2157  MUL2(in0, in1, in2, in3, out0, out1); \
2158  MUL2(in4, in5, in6, in7, out2, out3); \
2159 }
2160 
2161 /* Description : Addition of 2 pairs of vectors
2162  Arguments : Inputs - in0, in1, in2, in3
2163  Outputs - out0, out1
2164  Details : Each element from 2 pairs vectors is added and 2 results are
2165  produced
2166 */
2167 #define ADD2(in0, in1, in2, in3, out0, out1) \
2168 { \
2169  out0 = in0 + in1; \
2170  out1 = in2 + in3; \
2171 }
2172 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2173 { \
2174  ADD2(in0, in1, in2, in3, out0, out1); \
2175  ADD2(in4, in5, in6, in7, out2, out3); \
2176 }
2177 
2178 /* Description : Subtraction of 2 pairs of vectors
2179  Arguments : Inputs - in0, in1, in2, in3
2180  Outputs - out0, out1
2181  Details : Each element from 2 pairs vectors is subtracted and 2 results
2182  are produced
2183 */
2184 #define SUB2(in0, in1, in2, in3, out0, out1) \
2185 { \
2186  out0 = in0 - in1; \
2187  out1 = in2 - in3; \
2188 }
2189 #define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2190 { \
2191  out0 = in0 - in1; \
2192  out1 = in2 - in3; \
2193  out2 = in4 - in5; \
2194  out3 = in6 - in7; \
2195 }
2196 
2197 /* Description : Sign extend byte elements from right half of the vector
2198  Arguments : Input - in (byte vector)
2199  Output - out (sign extended halfword vector)
2200  Return Type - signed halfword
2201  Details : Sign bit of byte elements from input vector 'in' is
2202  extracted and interleaved with same vector 'in' to generate
2203  8 halfword elements keeping sign intact
2204 */
2205 #define UNPCK_R_SB_SH(in, out) \
2206 { \
2207  v16i8 sign_m; \
2208  \
2209  sign_m = __msa_clti_s_b((v16i8) in, 0); \
2210  out = (v8i16) __msa_ilvr_b(sign_m, (v16i8) in); \
2211 }
2212 
2213 /* Description : Sign extend halfword elements from right half of the vector
2214  Arguments : Inputs - in (input halfword vector)
2215  Outputs - out (sign extended word vectors)
2216  Return Type - signed word
2217  Details : Sign bit of halfword elements from input vector 'in' is
2218  extracted and interleaved with same vector 'in0' to generate
2219  4 word elements keeping sign intact
2220 */
2221 #if HAVE_MSA2
2222 #define UNPCK_R_SH_SW(in, out) \
2223 { \
2224  out = (v4i32) __builtin_msa2_w2x_lo_s_h((v8i16) in); \
2225 }
2226 #else
2227 #define UNPCK_R_SH_SW(in, out) \
2228 { \
2229  v8i16 sign_m; \
2230  \
2231  sign_m = __msa_clti_s_h((v8i16) in, 0); \
2232  out = (v4i32) __msa_ilvr_h(sign_m, (v8i16) in); \
2233 }
2234 #endif // #if HAVE_MSA2
2235 
2236 /* Description : Sign extend byte elements from input vector and return
2237  halfword results in pair of vectors
2238  Arguments : Inputs - in (1 input byte vector)
2239  Outputs - out0, out1 (sign extended 2 halfword vectors)
2240  Return Type - signed halfword
2241  Details : Sign bit of byte elements from input vector 'in' is
2242  extracted and interleaved right with same vector 'in0' to
2243  generate 8 signed halfword elements in 'out0'
2244  Then interleaved left with same vector 'in0' to
2245  generate 8 signed halfword elements in 'out1'
2246 */
2247 #if HAVE_MSA2
2248 #define UNPCK_SB_SH(in, out0, out1) \
2249 { \
2250  out0 = (v4i32) __builtin_msa2_w2x_lo_s_b((v16i8) in); \
2251  out1 = (v4i32) __builtin_msa2_w2x_hi_s_b((v16i8) in); \
2252 }
2253 #else
2254 #define UNPCK_SB_SH(in, out0, out1) \
2255 { \
2256  v16i8 tmp_m; \
2257  \
2258  tmp_m = __msa_clti_s_b((v16i8) in, 0); \
2259  ILVRL_B2_SH(tmp_m, in, out0, out1); \
2260 }
2261 #endif // #if HAVE_MSA2
2262 
2263 /* Description : Zero extend unsigned byte elements to halfword elements
2264  Arguments : Inputs - in (1 input unsigned byte vector)
2265  Outputs - out0, out1 (unsigned 2 halfword vectors)
2266  Return Type - signed halfword
2267  Details : Zero extended right half of vector is returned in 'out0'
2268  Zero extended left half of vector is returned in 'out1'
2269 */
2270 #define UNPCK_UB_SH(in, out0, out1) \
2271 { \
2272  v16i8 zero_m = { 0 }; \
2273  \
2274  ILVRL_B2_SH(zero_m, in, out0, out1); \
2275 }
2276 
2277 /* Description : Sign extend halfword elements from input vector and return
2278  result in pair of vectors
2279  Arguments : Inputs - in (1 input halfword vector)
2280  Outputs - out0, out1 (sign extended 2 word vectors)
2281  Return Type - signed word
2282  Details : Sign bit of halfword elements from input vector 'in' is
2283  extracted and interleaved right with same vector 'in0' to
2284  generate 4 signed word elements in 'out0'
2285  Then interleaved left with same vector 'in0' to
2286  generate 4 signed word elements in 'out1'
2287 */
2288 #if HAVE_MSA2
2289 #define UNPCK_SH_SW(in, out0, out1) \
2290 { \
2291  out0 = (v4i32) __builtin_msa2_w2x_lo_s_h((v8i16) in); \
2292  out1 = (v4i32) __builtin_msa2_w2x_hi_s_h((v8i16) in); \
2293 }
2294 #else
2295 #define UNPCK_SH_SW(in, out0, out1) \
2296 { \
2297  v8i16 tmp_m; \
2298  \
2299  tmp_m = __msa_clti_s_h((v8i16) in, 0); \
2300  ILVRL_H2_SW(tmp_m, in, out0, out1); \
2301 }
2302 #endif // #if HAVE_MSA2
2303 
2304 /* Description : Swap two variables
2305  Arguments : Inputs - in0, in1
2306  Outputs - in0, in1 (in-place)
2307  Details : Swapping of two input variables using xor
2308 */
2309 #define SWAP(in0, in1) \
2310 { \
2311  in0 = in0 ^ in1; \
2312  in1 = in0 ^ in1; \
2313  in0 = in0 ^ in1; \
2314 }
2315 
2316 /* Description : Butterfly of 4 input vectors
2317  Arguments : Inputs - in0, in1, in2, in3
2318  Outputs - out0, out1, out2, out3
2319  Details : Butterfly operation
2320 */
2321 #define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \
2322 { \
2323  out0 = in0 + in3; \
2324  out1 = in1 + in2; \
2325  \
2326  out2 = in1 - in2; \
2327  out3 = in0 - in3; \
2328 }
2329 
2330 /* Description : Butterfly of 8 input vectors
2331  Arguments : Inputs - in0 ... in7
2332  Outputs - out0 .. out7
2333  Details : Butterfly operation
2334 */
2335 #define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \
2336  out0, out1, out2, out3, out4, out5, out6, out7) \
2337 { \
2338  out0 = in0 + in7; \
2339  out1 = in1 + in6; \
2340  out2 = in2 + in5; \
2341  out3 = in3 + in4; \
2342  \
2343  out4 = in3 - in4; \
2344  out5 = in2 - in5; \
2345  out6 = in1 - in6; \
2346  out7 = in0 - in7; \
2347 }
2348 
2349 /* Description : Butterfly of 16 input vectors
2350  Arguments : Inputs - in0 ... in15
2351  Outputs - out0 .. out15
2352  Details : Butterfly operation
2353 */
2354 #define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, \
2355  in8, in9, in10, in11, in12, in13, in14, in15, \
2356  out0, out1, out2, out3, out4, out5, out6, out7, \
2357  out8, out9, out10, out11, out12, out13, out14, out15) \
2358 { \
2359  out0 = in0 + in15; \
2360  out1 = in1 + in14; \
2361  out2 = in2 + in13; \
2362  out3 = in3 + in12; \
2363  out4 = in4 + in11; \
2364  out5 = in5 + in10; \
2365  out6 = in6 + in9; \
2366  out7 = in7 + in8; \
2367  \
2368  out8 = in7 - in8; \
2369  out9 = in6 - in9; \
2370  out10 = in5 - in10; \
2371  out11 = in4 - in11; \
2372  out12 = in3 - in12; \
2373  out13 = in2 - in13; \
2374  out14 = in1 - in14; \
2375  out15 = in0 - in15; \
2376 }
2377 
2378 /* Description : Transposes input 4x4 byte block
2379  Arguments : Inputs - in0, in1, in2, in3 (input 4x4 byte block)
2380  Outputs - out0, out1, out2, out3 (output 4x4 byte block)
2381  Return Type - unsigned byte
2382  Details :
2383 */
2384 #define TRANSPOSE4x4_UB_UB(in0, in1, in2, in3, out0, out1, out2, out3) \
2385 { \
2386  v16i8 zero_m = { 0 }; \
2387  v16i8 s0_m, s1_m, s2_m, s3_m; \
2388  \
2389  ILVR_D2_SB(in1, in0, in3, in2, s0_m, s1_m); \
2390  ILVRL_B2_SB(s1_m, s0_m, s2_m, s3_m); \
2391  \
2392  out0 = (v16u8) __msa_ilvr_b(s3_m, s2_m); \
2393  out1 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out0, 4); \
2394  out2 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out1, 4); \
2395  out3 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out2, 4); \
2396 }
2397 
2398 /* Description : Transposes input 8x4 byte block into 4x8
2399  Arguments : Inputs - in0, in1, in2, in3 (input 8x4 byte block)
2400  Outputs - out0, out1, out2, out3 (output 4x8 byte block)
2401  Return Type - as per RTYPE
2402  Details :
2403 */
2404 #define TRANSPOSE8x4_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2405  out0, out1, out2, out3) \
2406 { \
2407  v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2408  \
2409  ILVEV_W2_SB(in0, in4, in1, in5, tmp0_m, tmp1_m); \
2410  tmp2_m = __msa_ilvr_b(tmp1_m, tmp0_m); \
2411  ILVEV_W2_SB(in2, in6, in3, in7, tmp0_m, tmp1_m); \
2412  \
2413  tmp3_m = __msa_ilvr_b(tmp1_m, tmp0_m); \
2414  ILVRL_H2_SB(tmp3_m, tmp2_m, tmp0_m, tmp1_m); \
2415  \
2416  ILVRL_W2(RTYPE, tmp1_m, tmp0_m, out0, out2); \
2417  out1 = (RTYPE) __msa_ilvl_d((v2i64) out2, (v2i64) out0); \
2418  out3 = (RTYPE) __msa_ilvl_d((v2i64) out0, (v2i64) out2); \
2419 }
2420 #define TRANSPOSE8x4_UB_UB(...) TRANSPOSE8x4_UB(v16u8, __VA_ARGS__)
2421 #define TRANSPOSE8x4_UB_UH(...) TRANSPOSE8x4_UB(v8u16, __VA_ARGS__)
2422 
2423 /* Description : Transposes input 8x8 byte block
2424  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
2425  (input 8x8 byte block)
2426  Outputs - out0, out1, out2, out3, out4, out5, out6, out7
2427  (output 8x8 byte block)
2428  Return Type - as per RTYPE
2429  Details :
2430 */
2431 #define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2432  out0, out1, out2, out3, out4, out5, out6, out7) \
2433 { \
2434  v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2435  v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2436  \
2437  ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, \
2438  tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2439  ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m); \
2440  ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m); \
2441  ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2); \
2442  ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \
2443  SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8); \
2444  SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8); \
2445 }
2446 #define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__)
2447 #define TRANSPOSE8x8_UB_UH(...) TRANSPOSE8x8_UB(v8u16, __VA_ARGS__)
2448 
2449 /* Description : Transposes 16x4 block into 4x16 with byte elements in vectors
2450  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
2451  in8, in9, in10, in11, in12, in13, in14, in15
2452  Outputs - out0, out1, out2, out3
2453  Return Type - unsigned byte
2454  Details :
2455 */
2456 #define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2457  in8, in9, in10, in11, in12, in13, in14, in15, \
2458  out0, out1, out2, out3) \
2459 { \
2460  v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2461  \
2462  ILVEV_W2_SD(in0, in4, in8, in12, tmp0_m, tmp1_m); \
2463  out1 = (v16u8) __msa_ilvev_d(tmp1_m, tmp0_m); \
2464  \
2465  ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \
2466  out3 = (v16u8) __msa_ilvev_d(tmp1_m, tmp0_m); \
2467  \
2468  ILVEV_W2_SD(in2, in6, in10, in14, tmp0_m, tmp1_m); \
2469  \
2470  tmp2_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
2471  ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
2472  \
2473  tmp3_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
2474  ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
2475  out0 = (v16u8) __msa_ilvev_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2476  out2 = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2477  \
2478  tmp0_m = (v2i64) __msa_ilvod_b((v16i8) out3, (v16i8) out1); \
2479  tmp1_m = (v2i64) __msa_ilvod_b((v16i8) tmp3_m, (v16i8) tmp2_m); \
2480  out1 = (v16u8) __msa_ilvev_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2481  out3 = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2482 }
2483 
2484 /* Description : Transposes 16x8 block into 8x16 with byte elements in vectors
2485  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
2486  in8, in9, in10, in11, in12, in13, in14, in15
2487  Outputs - out0, out1, out2, out3, out4, out5, out6, out7
2488  Return Type - unsigned byte
2489  Details :
2490 */
2491 #define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2492  in8, in9, in10, in11, in12, in13, in14, in15, \
2493  out0, out1, out2, out3, out4, out5, out6, out7) \
2494 { \
2495  v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2496  v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2497  \
2498  ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
2499  ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
2500  ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \
2501  ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \
2502  \
2503  tmp0_m = (v16u8) __msa_ilvev_b((v16i8) out6, (v16i8) out7); \
2504  tmp4_m = (v16u8) __msa_ilvod_b((v16i8) out6, (v16i8) out7); \
2505  tmp1_m = (v16u8) __msa_ilvev_b((v16i8) out4, (v16i8) out5); \
2506  tmp5_m = (v16u8) __msa_ilvod_b((v16i8) out4, (v16i8) out5); \
2507  out5 = (v16u8) __msa_ilvev_b((v16i8) out2, (v16i8) out3); \
2508  tmp6_m = (v16u8) __msa_ilvod_b((v16i8) out2, (v16i8) out3); \
2509  out7 = (v16u8) __msa_ilvev_b((v16i8) out0, (v16i8) out1); \
2510  tmp7_m = (v16u8) __msa_ilvod_b((v16i8) out0, (v16i8) out1); \
2511  \
2512  ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
2513  out0 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2514  out4 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2515  \
2516  tmp2_m = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2517  tmp3_m = (v16u8) __msa_ilvod_h((v8i16) out7, (v8i16) out5); \
2518  out2 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2519  out6 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2520  \
2521  ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
2522  out1 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2523  out5 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2524  \
2525  tmp2_m = (v16u8) __msa_ilvod_h((v8i16) tmp5_m, (v8i16) tmp4_m); \
2526  tmp2_m = (v16u8) __msa_ilvod_h((v8i16) tmp5_m, (v8i16) tmp4_m); \
2527  tmp3_m = (v16u8) __msa_ilvod_h((v8i16) tmp7_m, (v8i16) tmp6_m); \
2528  tmp3_m = (v16u8) __msa_ilvod_h((v8i16) tmp7_m, (v8i16) tmp6_m); \
2529  out3 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2530  out7 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2531 }
2532 
2533 /* Description : Transposes 4x4 block with half word elements in vectors
2534  Arguments : Inputs - in0, in1, in2, in3
2535  Outputs - out0, out1, out2, out3
2536  Return Type - signed halfword
2537  Details :
2538 */
2539 #define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
2540 { \
2541  v8i16 s0_m, s1_m; \
2542  \
2543  ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \
2544  ILVRL_W2_SH(s1_m, s0_m, out0, out2); \
2545  out1 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out0); \
2546  out3 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out2); \
2547 }
2548 
2549 /* Description : Transposes 8x8 block with half word elements in vectors
2550  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
2551  Outputs - out0, out1, out2, out3, out4, out5, out6, out7
2552  Return Type - as per RTYPE
2553  Details :
2554 */
2555 #define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2556  out0, out1, out2, out3, out4, out5, out6, out7) \
2557 { \
2558  v8i16 s0_m, s1_m; \
2559  v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2560  v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2561  \
2562  ILVR_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \
2563  ILVRL_H2_SH(s1_m, s0_m, tmp0_m, tmp1_m); \
2564  ILVL_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \
2565  ILVRL_H2_SH(s1_m, s0_m, tmp2_m, tmp3_m); \
2566  ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2567  ILVRL_H2_SH(s1_m, s0_m, tmp4_m, tmp5_m); \
2568  ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2569  ILVRL_H2_SH(s1_m, s0_m, tmp6_m, tmp7_m); \
2570  PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, \
2571  tmp3_m, tmp7_m, out0, out2, out4, out6); \
2572  out1 = (RTYPE) __msa_pckod_d((v2i64) tmp0_m, (v2i64) tmp4_m); \
2573  out3 = (RTYPE) __msa_pckod_d((v2i64) tmp1_m, (v2i64) tmp5_m); \
2574  out5 = (RTYPE) __msa_pckod_d((v2i64) tmp2_m, (v2i64) tmp6_m); \
2575  out7 = (RTYPE) __msa_pckod_d((v2i64) tmp3_m, (v2i64) tmp7_m); \
2576 }
2577 #define TRANSPOSE8x8_UH_UH(...) TRANSPOSE8x8_H(v8u16, __VA_ARGS__)
2578 #define TRANSPOSE8x8_SH_SH(...) TRANSPOSE8x8_H(v8i16, __VA_ARGS__)
2579 
2580 /* Description : Transposes 4x4 block with word elements in vectors
2581  Arguments : Inputs - in0, in1, in2, in3
2582  Outputs - out0, out1, out2, out3
2583  Return Type - signed word
2584  Details :
2585 */
2586 #define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \
2587 { \
2588  v4i32 s0_m, s1_m, s2_m, s3_m; \
2589  \
2590  ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
2591  ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
2592  \
2593  out0 = (v4i32) __msa_ilvr_d((v2i64) s2_m, (v2i64) s0_m); \
2594  out1 = (v4i32) __msa_ilvl_d((v2i64) s2_m, (v2i64) s0_m); \
2595  out2 = (v4i32) __msa_ilvr_d((v2i64) s3_m, (v2i64) s1_m); \
2596  out3 = (v4i32) __msa_ilvl_d((v2i64) s3_m, (v2i64) s1_m); \
2597 }
2598 
2599 /* Description : Average byte elements from pair of vectors and store 8x4 byte
2600  block in destination memory
2601  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride
2602  Details : Each byte element from input vector pair 'in0' and 'in1' are
2603  averaged (a + b)/2 and stored in 'tmp0_m'
2604  Each byte element from input vector pair 'in2' and 'in3' are
2605  averaged (a + b)/2 and stored in 'tmp1_m'
2606  Each byte element from input vector pair 'in4' and 'in5' are
2607  averaged (a + b)/2 and stored in 'tmp2_m'
2608  Each byte element from input vector pair 'in6' and 'in7' are
2609  averaged (a + b)/2 and stored in 'tmp3_m'
2610  The half vector results from all 4 vectors are stored in
2611  destination memory as 8x4 byte block
2612 */
2613 #define AVE_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2614 { \
2615  uint64_t out0_m, out1_m, out2_m, out3_m; \
2616  v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2617  \
2618  tmp0_m = __msa_ave_u_b((v16u8) in0, (v16u8) in1); \
2619  tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2620  tmp2_m = __msa_ave_u_b((v16u8) in4, (v16u8) in5); \
2621  tmp3_m = __msa_ave_u_b((v16u8) in6, (v16u8) in7); \
2622  \
2623  out0_m = __msa_copy_u_d((v2i64) tmp0_m, 0); \
2624  out1_m = __msa_copy_u_d((v2i64) tmp1_m, 0); \
2625  out2_m = __msa_copy_u_d((v2i64) tmp2_m, 0); \
2626  out3_m = __msa_copy_u_d((v2i64) tmp3_m, 0); \
2627  SD4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2628 }
2629 
2630 /* Description : Average byte elements from pair of vectors and store 16x4 byte
2631  block in destination memory
2632  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride
2633  Details : Each byte element from input vector pair 'in0' and 'in1' are
2634  averaged (a + b)/2 and stored in 'tmp0_m'
2635  Each byte element from input vector pair 'in2' and 'in3' are
2636  averaged (a + b)/2 and stored in 'tmp1_m'
2637  Each byte element from input vector pair 'in4' and 'in5' are
2638  averaged (a + b)/2 and stored in 'tmp2_m'
2639  Each byte element from input vector pair 'in6' and 'in7' are
2640  averaged (a + b)/2 and stored in 'tmp3_m'
2641  The results from all 4 vectors are stored in destination
2642  memory as 16x4 byte block
2643 */
2644 #define AVE_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2645 { \
2646  v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2647  \
2648  tmp0_m = __msa_ave_u_b((v16u8) in0, (v16u8) in1); \
2649  tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2650  tmp2_m = __msa_ave_u_b((v16u8) in4, (v16u8) in5); \
2651  tmp3_m = __msa_ave_u_b((v16u8) in6, (v16u8) in7); \
2652  \
2653  ST_UB4(tmp0_m, tmp1_m, tmp2_m, tmp3_m, pdst, stride); \
2654 }
2655 
2656 /* Description : Average rounded byte elements from pair of vectors and store
2657  8x4 byte block in destination memory
2658  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride
2659  Details : Each byte element from input vector pair 'in0' and 'in1' are
2660  average rounded (a + b + 1)/2 and stored in 'tmp0_m'
2661  Each byte element from input vector pair 'in2' and 'in3' are
2662  average rounded (a + b + 1)/2 and stored in 'tmp1_m'
2663  Each byte element from input vector pair 'in4' and 'in5' are
2664  average rounded (a + b + 1)/2 and stored in 'tmp2_m'
2665  Each byte element from input vector pair 'in6' and 'in7' are
2666  average rounded (a + b + 1)/2 and stored in 'tmp3_m'
2667  The half vector results from all 4 vectors are stored in
2668  destination memory as 8x4 byte block
2669 */
2670 #define AVER_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2671 { \
2672  uint64_t out0_m, out1_m, out2_m, out3_m; \
2673  v16u8 tp0_m, tp1_m, tp2_m, tp3_m; \
2674  \
2675  AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2676  tp0_m, tp1_m, tp2_m, tp3_m); \
2677  \
2678  out0_m = __msa_copy_u_d((v2i64) tp0_m, 0); \
2679  out1_m = __msa_copy_u_d((v2i64) tp1_m, 0); \
2680  out2_m = __msa_copy_u_d((v2i64) tp2_m, 0); \
2681  out3_m = __msa_copy_u_d((v2i64) tp3_m, 0); \
2682  SD4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2683 }
2684 
2685 /* Description : Average rounded byte elements from pair of vectors and store
2686  16x4 byte block in destination memory
2687  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride
2688  Details : Each byte element from input vector pair 'in0' and 'in1' are
2689  average rounded (a + b + 1)/2 and stored in 'tmp0_m'
2690  Each byte element from input vector pair 'in2' and 'in3' are
2691  average rounded (a + b + 1)/2 and stored in 'tmp1_m'
2692  Each byte element from input vector pair 'in4' and 'in5' are
2693  average rounded (a + b + 1)/2 and stored in 'tmp2_m'
2694  Each byte element from input vector pair 'in6' and 'in7' are
2695  average rounded (a + b + 1)/2 and stored in 'tmp3_m'
2696  The vector results from all 4 vectors are stored in
2697  destination memory as 16x4 byte block
2698 */
2699 #define AVER_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2700 { \
2701  v16u8 t0_m, t1_m, t2_m, t3_m; \
2702  \
2703  AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2704  t0_m, t1_m, t2_m, t3_m); \
2705  ST_UB4(t0_m, t1_m, t2_m, t3_m, pdst, stride); \
2706 }
2707 
2708 /* Description : Average rounded byte elements from pair of vectors,
2709  average rounded with destination and store 8x4 byte block
2710  in destination memory
2711  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride
2712  Details : Each byte element from input vector pair 'in0' and 'in1' are
2713  average rounded (a + b + 1)/2 and stored in 'tmp0_m'
2714  Each byte element from input vector pair 'in2' and 'in3' are
2715  average rounded (a + b + 1)/2 and stored in 'tmp1_m'
2716  Each byte element from input vector pair 'in4' and 'in5' are
2717  average rounded (a + b + 1)/2 and stored in 'tmp2_m'
2718  Each byte element from input vector pair 'in6' and 'in7' are
2719  average rounded (a + b + 1)/2 and stored in 'tmp3_m'
2720  The half vector results from all 4 vectors are stored in
2721  destination memory as 8x4 byte block
2722 */
2723 #define AVER_DST_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2724  pdst, stride) \
2725 { \
2726  v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2727  v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
2728  \
2729  LD_UB4(pdst, stride, dst0_m, dst1_m, dst2_m, dst3_m); \
2730  AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2731  tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2732  AVER_ST8x4_UB(dst0_m, tmp0_m, dst1_m, tmp1_m, \
2733  dst2_m, tmp2_m, dst3_m, tmp3_m, pdst, stride); \
2734 }
2735 
2736 /* Description : Average rounded byte elements from pair of vectors,
2737  average rounded with destination and store 16x4 byte block
2738  in destination memory
2739  Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride
2740  Details : Each byte element from input vector pair 'in0' and 'in1' are
2741  average rounded (a + b + 1)/2 and stored in 'tmp0_m'
2742  Each byte element from input vector pair 'in2' and 'in3' are
2743  average rounded (a + b + 1)/2 and stored in 'tmp1_m'
2744  Each byte element from input vector pair 'in4' and 'in5' are
2745  average rounded (a + b + 1)/2 and stored in 'tmp2_m'
2746  Each byte element from input vector pair 'in6' and 'in7' are
2747  average rounded (a + b + 1)/2 and stored in 'tmp3_m'
2748  The vector results from all 4 vectors are stored in
2749  destination memory as 16x4 byte block
2750 */
2751 #define AVER_DST_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2752  pdst, stride) \
2753 { \
2754  v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2755  v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
2756  \
2757  LD_UB4(pdst, stride, dst0_m, dst1_m, dst2_m, dst3_m); \
2758  AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2759  tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2760  AVER_ST16x4_UB(dst0_m, tmp0_m, dst1_m, tmp1_m, \
2761  dst2_m, tmp2_m, dst3_m, tmp3_m, pdst, stride); \
2762 }
2763 
2764 /* Description : Add block 4x4
2765  Arguments : Inputs - in0, in1, in2, in3, pdst, stride
2766  Details : Least significant 4 bytes from each input vector are added to
2767  the destination bytes, clipped between 0-255 and then stored.
2768 */
2769 #define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
2770 { \
2771  uint32_t src0_m, src1_m, src2_m, src3_m; \
2772  uint32_t out0_m, out1_m, out2_m, out3_m; \
2773  v8i16 inp0_m, inp1_m, res0_m, res1_m; \
2774  v16i8 dst0_m = { 0 }; \
2775  v16i8 dst1_m = { 0 }; \
2776  v16i8 zero_m = { 0 }; \
2777  \
2778  ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m) \
2779  LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \
2780  INSERT_W2_SB(src0_m, src1_m, dst0_m); \
2781  INSERT_W2_SB(src2_m, src3_m, dst1_m); \
2782  ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \
2783  ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \
2784  CLIP_SH2_0_255(res0_m, res1_m); \
2785  PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
2786  \
2787  out0_m = __msa_copy_u_w((v4i32) dst0_m, 0); \
2788  out1_m = __msa_copy_u_w((v4i32) dst0_m, 1); \
2789  out2_m = __msa_copy_u_w((v4i32) dst1_m, 0); \
2790  out3_m = __msa_copy_u_w((v4i32) dst1_m, 1); \
2791  SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2792 }
2793 
2794 /* Description : Dot product and addition of 3 signed halfword input vectors
2795  Arguments : Inputs - in0, in1, in2, coeff0, coeff1, coeff2
2796  Outputs - out0_m
2797  Return Type - signed halfword
2798  Details : Dot product of 'in0' with 'coeff0'
2799  Dot product of 'in1' with 'coeff1'
2800  Dot product of 'in2' with 'coeff2'
2801  Addition of all the 3 vector results
2802 
2803  out0_m = (in0 * coeff0) + (in1 * coeff1) + (in2 * coeff2)
2804 */
2805 #define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \
2806 ( { \
2807  v8i16 out0_m; \
2808  \
2809  out0_m = __msa_dotp_s_h((v16i8) in0, (v16i8) coeff0); \
2810  out0_m = __msa_dpadd_s_h(out0_m, (v16i8) in1, (v16i8) coeff1); \
2811  out0_m = __msa_dpadd_s_h(out0_m, (v16i8) in2, (v16i8) coeff2); \
2812  \
2813  out0_m; \
2814 } )
2815 
2816 /* Description : Pack even elements of input vectors & xor with 128
2817  Arguments : Inputs - in0, in1
2818  Outputs - out_m
2819  Return Type - unsigned byte
2820  Details : Signed byte even elements from 'in0' and 'in1' are packed
2821  together in one vector and the resulted vector is xor'ed with
2822  128 to shift the range from signed to unsigned byte
2823 */
2824 #define PCKEV_XORI128_UB(in0, in1) \
2825 ( { \
2826  v16u8 out_m; \
2827  out_m = (v16u8) __msa_pckev_b((v16i8) in1, (v16i8) in0); \
2828  out_m = (v16u8) __msa_xori_b((v16u8) out_m, 128); \
2829  out_m; \
2830 } )
2831 
2832 /* Description : Converts inputs to unsigned bytes, interleave, average & store
2833  as 8x4 unsigned byte block
2834  Arguments : Inputs - in0, in1, in2, in3, dst0, dst1, pdst, stride
2835 */
2836 #define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3, \
2837  dst0, dst1, pdst, stride) \
2838 { \
2839  v16u8 tmp0_m, tmp1_m; \
2840  uint8_t *pdst_m = (uint8_t *) (pdst); \
2841  \
2842  tmp0_m = PCKEV_XORI128_UB(in0, in1); \
2843  tmp1_m = PCKEV_XORI128_UB(in2, in3); \
2844  AVER_UB2_UB(tmp0_m, dst0, tmp1_m, dst1, tmp0_m, tmp1_m); \
2845  ST_D4(tmp0_m, tmp1_m, 0, 1, 0, 1, pdst_m, stride); \
2846 }
2847 
2848 /* Description : Pack even byte elements, extract 0 & 2 index words from pair
2849  of results and store 4 words in destination memory as per
2850  stride
2851  Arguments : Inputs - in0, in1, in2, in3, pdst, stride
2852 */
2853 #define PCKEV_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
2854 { \
2855  uint32_t out0_m, out1_m, out2_m, out3_m; \
2856  v16i8 tmp0_m, tmp1_m; \
2857  \
2858  PCKEV_B2_SB(in1, in0, in3, in2, tmp0_m, tmp1_m); \
2859  \
2860  out0_m = __msa_copy_u_w((v4i32) tmp0_m, 0); \
2861  out1_m = __msa_copy_u_w((v4i32) tmp0_m, 2); \
2862  out2_m = __msa_copy_u_w((v4i32) tmp1_m, 0); \
2863  out3_m = __msa_copy_u_w((v4i32) tmp1_m, 2); \
2864  \
2865  SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2866 }
2867 
2868 /* Description : Pack even byte elements and store byte vector in destination
2869  memory
2870  Arguments : Inputs - in0, in1, pdst
2871 */
2872 #define PCKEV_ST_SB(in0, in1, pdst) \
2873 { \
2874  v16i8 tmp_m; \
2875  tmp_m = __msa_pckev_b((v16i8) in1, (v16i8) in0); \
2876  ST_SB(tmp_m, (pdst)); \
2877 }
2878 
2879 /* Description : Horizontal 2 tap filter kernel code
2880  Arguments : Inputs - in0, in1, mask, coeff, shift
2881 */
2882 #define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) \
2883 ( { \
2884  v16i8 tmp0_m; \
2885  v8u16 tmp1_m; \
2886  \
2887  tmp0_m = __msa_vshf_b((v16i8) mask, (v16i8) in1, (v16i8) in0); \
2888  tmp1_m = __msa_dotp_u_h((v16u8) tmp0_m, (v16u8) coeff); \
2889  tmp1_m = (v8u16) __msa_srari_h((v8i16) tmp1_m, shift); \
2890  tmp1_m = __msa_sat_u_h(tmp1_m, shift); \
2891  \
2892  tmp1_m; \
2893 } )
2894 #endif /* AVUTIL_MIPS_GENERIC_MACROS_MSA_H */
config.h