FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fmtconvert_altivec.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavcodec/fmtconvert.h"
22 
24 #include "libavutil/attributes.h"
25 #include "libavutil/mem.h"
26 #include "dsputil_altivec.h"
27 
28 static void int32_to_float_fmul_scalar_altivec(float *dst, const int *src,
29  float mul, int len)
30 {
31  union {
32  vector float v;
33  float s[4];
34  } mul_u;
35  int i;
36  vector float src1, src2, dst1, dst2, mul_v, zero;
37 
38  zero = (vector float)vec_splat_u32(0);
39  mul_u.s[0] = mul;
40  mul_v = vec_splat(mul_u.v, 0);
41 
42  for (i = 0; i < len; i += 8) {
43  src1 = vec_ctf(vec_ld(0, src+i), 0);
44  src2 = vec_ctf(vec_ld(16, src+i), 0);
45  dst1 = vec_madd(src1, mul_v, zero);
46  dst2 = vec_madd(src2, mul_v, zero);
47  vec_st(dst1, 0, dst+i);
48  vec_st(dst2, 16, dst+i);
49  }
50 }
51 
52 
53 static vector signed short float_to_int16_one_altivec(const float *src)
54 {
55  vector float s0 = vec_ld(0, src);
56  vector float s1 = vec_ld(16, src);
57  vector signed int t0 = vec_cts(s0, 0);
58  vector signed int t1 = vec_cts(s1, 0);
59  return vec_packs(t0,t1);
60 }
61 
62 static void float_to_int16_altivec(int16_t *dst, const float *src, long len)
63 {
64  int i;
65  vector signed short d0, d1, d;
66  vector unsigned char align;
67  if (((long)dst) & 15) { //FIXME
68  for (i = 0; i < len - 7; i += 8) {
69  d0 = vec_ld(0, dst+i);
70  d = float_to_int16_one_altivec(src + i);
71  d1 = vec_ld(15, dst+i);
72  d1 = vec_perm(d1, d0, vec_lvsl(0, dst + i));
73  align = vec_lvsr(0, dst + i);
74  d0 = vec_perm(d1, d, align);
75  d1 = vec_perm(d, d1, align);
76  vec_st(d0, 0, dst + i);
77  vec_st(d1, 15, dst + i);
78  }
79  } else {
80  for (i = 0; i < len - 7; i += 8) {
81  d = float_to_int16_one_altivec(src + i);
82  vec_st(d, 0, dst + i);
83  }
84  }
85 }
86 
87 #define VSTE_INC(dst, v, elem, inc) do { \
88  vector signed short s = vec_splat(v, elem); \
89  vec_ste(s, 0, dst); \
90  dst += inc; \
91  } while (0)
92 
93 static void float_to_int16_stride_altivec(int16_t *dst, const float *src,
94  long len, int stride)
95 {
96  int i;
97  vector signed short d;
98 
99  for (i = 0; i < len - 7; i += 8) {
100  d = float_to_int16_one_altivec(src + i);
101  VSTE_INC(dst, d, 0, stride);
102  VSTE_INC(dst, d, 1, stride);
103  VSTE_INC(dst, d, 2, stride);
104  VSTE_INC(dst, d, 3, stride);
105  VSTE_INC(dst, d, 4, stride);
106  VSTE_INC(dst, d, 5, stride);
107  VSTE_INC(dst, d, 6, stride);
108  VSTE_INC(dst, d, 7, stride);
109  }
110 }
111 
112 static void float_to_int16_interleave_altivec(int16_t *dst, const float **src,
113  long len, int channels)
114 {
115  int i;
116  vector signed short d0, d1, d2, c0, c1, t0, t1;
117  vector unsigned char align;
118 
119  if (channels == 1)
120  float_to_int16_altivec(dst, src[0], len);
121  else {
122  if (channels == 2) {
123  if (((long)dst) & 15) {
124  for (i = 0; i < len - 7; i += 8) {
125  d0 = vec_ld(0, dst + i);
126  t0 = float_to_int16_one_altivec(src[0] + i);
127  d1 = vec_ld(31, dst + i);
128  t1 = float_to_int16_one_altivec(src[1] + i);
129  c0 = vec_mergeh(t0, t1);
130  c1 = vec_mergel(t0, t1);
131  d2 = vec_perm(d1, d0, vec_lvsl(0, dst + i));
132  align = vec_lvsr(0, dst + i);
133  d0 = vec_perm(d2, c0, align);
134  d1 = vec_perm(c0, c1, align);
135  vec_st(d0, 0, dst + i);
136  d0 = vec_perm(c1, d2, align);
137  vec_st(d1, 15, dst + i);
138  vec_st(d0, 31, dst + i);
139  dst += 8;
140  }
141  } else {
142  for (i = 0; i < len - 7; i += 8) {
143  t0 = float_to_int16_one_altivec(src[0] + i);
144  t1 = float_to_int16_one_altivec(src[1] + i);
145  d0 = vec_mergeh(t0, t1);
146  d1 = vec_mergel(t0, t1);
147  vec_st(d0, 0, dst + i);
148  vec_st(d1, 16, dst + i);
149  dst += 8;
150  }
151  }
152  } else {
153  for (i = 0; i < channels; i++)
154  float_to_int16_stride_altivec(dst + i, src[i], len, channels);
155  }
156  }
157 }
158 
160 {
162  if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
165  }
166 }