FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
idct_altivec.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001 Michel Lespinasse
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /* NOTE: This code is based on GPL code from the libmpeg2 project. The
22  * author, Michel Lespinasses, has given explicit permission to release
23  * under LGPL as part of FFmpeg.
24  *
25  * FFmpeg integration by Dieter Shirley
26  *
27  * This file is a direct copy of the AltiVec IDCT module from the libmpeg2
28  * project. I've deleted all of the libmpeg2-specific code, renamed the
29  * functions and reordered the function parameters. The only change to the
30  * IDCT function itself was to factor out the partial transposition, and to
31  * perform a full transpose at the end of the function. */
32 
33 #include <stdlib.h>
34 #include <string.h>
35 #include "config.h"
36 #if HAVE_ALTIVEC_H
37 #include <altivec.h>
38 #endif
39 
41 #include "dsputil_altivec.h"
42 
43 #define IDCT_HALF \
44  /* 1st stage */ \
45  t1 = vec_mradds(a1, vx7, vx1); \
46  t8 = vec_mradds(a1, vx1, vec_subs(zero, vx7)); \
47  t7 = vec_mradds(a2, vx5, vx3); \
48  t3 = vec_mradds(ma2, vx3, vx5); \
49  \
50  /* 2nd stage */ \
51  t5 = vec_adds(vx0, vx4); \
52  t0 = vec_subs(vx0, vx4); \
53  t2 = vec_mradds(a0, vx6, vx2); \
54  t4 = vec_mradds(a0, vx2, vec_subs(zero, vx6)); \
55  t6 = vec_adds(t8, t3); \
56  t3 = vec_subs(t8, t3); \
57  t8 = vec_subs(t1, t7); \
58  t1 = vec_adds(t1, t7); \
59  \
60  /* 3rd stage */ \
61  t7 = vec_adds(t5, t2); \
62  t2 = vec_subs(t5, t2); \
63  t5 = vec_adds(t0, t4); \
64  t0 = vec_subs(t0, t4); \
65  t4 = vec_subs(t8, t3); \
66  t3 = vec_adds(t8, t3); \
67  \
68  /* 4th stage */ \
69  vy0 = vec_adds(t7, t1); \
70  vy7 = vec_subs(t7, t1); \
71  vy1 = vec_mradds(c4, t3, t5); \
72  vy6 = vec_mradds(mc4, t3, t5); \
73  vy2 = vec_mradds(c4, t4, t0); \
74  vy5 = vec_mradds(mc4, t4, t0); \
75  vy3 = vec_adds(t2, t6); \
76  vy4 = vec_subs(t2, t6)
77 
78 #define IDCT \
79  vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
80  vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8; \
81  \
82  vec_s16 c4 = vec_splat(constants[0], 0); \
83  vec_s16 a0 = vec_splat(constants[0], 1); \
84  vec_s16 a1 = vec_splat(constants[0], 2); \
85  vec_s16 a2 = vec_splat(constants[0], 3); \
86  vec_s16 mc4 = vec_splat(constants[0], 4); \
87  vec_s16 ma2 = vec_splat(constants[0], 5); \
88  vec_s16 bias = (vec_s16) vec_splat((vec_s32) constants[0], 3); \
89  \
90  vec_s16 zero = vec_splat_s16(0); \
91  vec_u16 shift = vec_splat_u16(4); \
92  \
93  vec_s16 vx0 = vec_mradds(vec_sl(block[0], shift), constants[1], zero); \
94  vec_s16 vx1 = vec_mradds(vec_sl(block[1], shift), constants[2], zero); \
95  vec_s16 vx2 = vec_mradds(vec_sl(block[2], shift), constants[3], zero); \
96  vec_s16 vx3 = vec_mradds(vec_sl(block[3], shift), constants[4], zero); \
97  vec_s16 vx4 = vec_mradds(vec_sl(block[4], shift), constants[1], zero); \
98  vec_s16 vx5 = vec_mradds(vec_sl(block[5], shift), constants[4], zero); \
99  vec_s16 vx6 = vec_mradds(vec_sl(block[6], shift), constants[3], zero); \
100  vec_s16 vx7 = vec_mradds(vec_sl(block[7], shift), constants[2], zero); \
101  \
102  IDCT_HALF; \
103  \
104  vx0 = vec_mergeh(vy0, vy4); \
105  vx1 = vec_mergel(vy0, vy4); \
106  vx2 = vec_mergeh(vy1, vy5); \
107  vx3 = vec_mergel(vy1, vy5); \
108  vx4 = vec_mergeh(vy2, vy6); \
109  vx5 = vec_mergel(vy2, vy6); \
110  vx6 = vec_mergeh(vy3, vy7); \
111  vx7 = vec_mergel(vy3, vy7); \
112  \
113  vy0 = vec_mergeh(vx0, vx4); \
114  vy1 = vec_mergel(vx0, vx4); \
115  vy2 = vec_mergeh(vx1, vx5); \
116  vy3 = vec_mergel(vx1, vx5); \
117  vy4 = vec_mergeh(vx2, vx6); \
118  vy5 = vec_mergel(vx2, vx6); \
119  vy6 = vec_mergeh(vx3, vx7); \
120  vy7 = vec_mergel(vx3, vx7); \
121  \
122  vx0 = vec_adds(vec_mergeh(vy0, vy4), bias); \
123  vx1 = vec_mergel(vy0, vy4); \
124  vx2 = vec_mergeh(vy1, vy5); \
125  vx3 = vec_mergel(vy1, vy5); \
126  vx4 = vec_mergeh(vy2, vy6); \
127  vx5 = vec_mergel(vy2, vy6); \
128  vx6 = vec_mergeh(vy3, vy7); \
129  vx7 = vec_mergel(vy3, vy7); \
130  \
131  IDCT_HALF; \
132  \
133  shift = vec_splat_u16(6); \
134  vx0 = vec_sra(vy0, shift); \
135  vx1 = vec_sra(vy1, shift); \
136  vx2 = vec_sra(vy2, shift); \
137  vx3 = vec_sra(vy3, shift); \
138  vx4 = vec_sra(vy4, shift); \
139  vx5 = vec_sra(vy5, shift); \
140  vx6 = vec_sra(vy6, shift); \
141  vx7 = vec_sra(vy7, shift)
142 
143 static const vec_s16 constants[5] = {
144  { 23170, 13573, 6518, 21895, -23170, -21895, 32, 31 },
145  { 16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725 },
146  { 22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521 },
147  { 21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692 },
148  { 19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722 }
149 };
150 
151 void ff_idct_altivec(int16_t *blk)
152 {
153  vec_s16 *block = (vec_s16 *) blk;
154 
155  IDCT;
156 
157  block[0] = vx0;
158  block[1] = vx1;
159  block[2] = vx2;
160  block[3] = vx3;
161  block[4] = vx4;
162  block[5] = vx5;
163  block[6] = vx6;
164  block[7] = vx7;
165 }
166 
167 void ff_idct_put_altivec(uint8_t *dest, int stride, int16_t *blk)
168 {
169  vec_s16 *block = (vec_s16 *) blk;
170  vec_u8 tmp;
171 
172  IDCT;
173 
174 #define COPY(dest, src) \
175  tmp = vec_packsu(src, src); \
176  vec_ste((vec_u32) tmp, 0, (unsigned int *) dest); \
177  vec_ste((vec_u32) tmp, 4, (unsigned int *) dest)
178 
179  COPY(dest, vx0);
180  dest += stride;
181  COPY(dest, vx1);
182  dest += stride;
183  COPY(dest, vx2);
184  dest += stride;
185  COPY(dest, vx3);
186  dest += stride;
187  COPY(dest, vx4);
188  dest += stride;
189  COPY(dest, vx5);
190  dest += stride;
191  COPY(dest, vx6);
192  dest += stride;
193  COPY(dest, vx7);
194 }
195 
196 void ff_idct_add_altivec(uint8_t *dest, int stride, int16_t *blk)
197 {
198  vec_s16 *block = (vec_s16 *) blk;
199  vec_u8 tmp;
200  vec_s16 tmp2, tmp3;
201  vec_u8 perm0;
202  vec_u8 perm1;
203  vec_u8 p0, p1, p;
204 
205  IDCT;
206 
207  p0 = vec_lvsl(0, dest);
208  p1 = vec_lvsl(stride, dest);
209  p = vec_splat_u8(-1);
210  perm0 = vec_mergeh(p, p0);
211  perm1 = vec_mergeh(p, p1);
212 
213 #define ADD(dest, src, perm) \
214  /* *(uint64_t *) &tmp = *(uint64_t *) dest; */ \
215  tmp = vec_ld(0, dest); \
216  tmp2 = (vec_s16) vec_perm(tmp, (vec_u8) zero, perm); \
217  tmp3 = vec_adds(tmp2, src); \
218  tmp = vec_packsu(tmp3, tmp3); \
219  vec_ste((vec_u32) tmp, 0, (unsigned int *) dest); \
220  vec_ste((vec_u32) tmp, 4, (unsigned int *) dest)
221 
222  ADD(dest, vx0, perm0);
223  dest += stride;
224  ADD(dest, vx1, perm1);
225  dest += stride;
226  ADD(dest, vx2, perm0);
227  dest += stride;
228  ADD(dest, vx3, perm1);
229  dest += stride;
230  ADD(dest, vx4, perm0);
231  dest += stride;
232  ADD(dest, vx5, perm1);
233  dest += stride;
234  ADD(dest, vx6, perm0);
235  dest += stride;
236  ADD(dest, vx7, perm1);
237 }