FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
int_altivec.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  ** @file
23  ** integer misc ops.
24  **/
25 
26 #include "config.h"
27 #if HAVE_ALTIVEC_H
28 #include <altivec.h>
29 #endif
30 
32 #include "libavcodec/dsputil.h"
33 
34 #include "dsputil_altivec.h"
35 
36 static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
37  int size) {
38  int i, size16;
39  vector signed char vpix1;
40  vector signed short vpix2, vdiff, vpix1l,vpix1h;
41  union { vector signed int vscore;
42  int32_t score[4];
43  } u;
44  u.vscore = vec_splat_s32(0);
45 //
46 //XXX lazy way, fix it later
47 
48 #define vec_unaligned_load(b) \
49  vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b));
50 
51  size16 = size >> 4;
52  while(size16) {
53 // score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
54  //load pix1 and the first batch of pix2
55 
56  vpix1 = vec_unaligned_load(pix1);
57  vpix2 = vec_unaligned_load(pix2);
58  pix2 += 8;
59  //unpack
60  vpix1h = vec_unpackh(vpix1);
61  vdiff = vec_sub(vpix1h, vpix2);
62  vpix1l = vec_unpackl(vpix1);
63  // load another batch from pix2
64  vpix2 = vec_unaligned_load(pix2);
65  u.vscore = vec_msum(vdiff, vdiff, u.vscore);
66  vdiff = vec_sub(vpix1l, vpix2);
67  u.vscore = vec_msum(vdiff, vdiff, u.vscore);
68  pix1 += 16;
69  pix2 += 8;
70  size16--;
71  }
72  u.vscore = vec_sums(u.vscore, vec_splat_s32(0));
73 
74  size %= 16;
75  for (i = 0; i < size; i++) {
76  u.score[3] += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
77  }
78  return u.score[3];
79 }
80 
81 static int32_t scalarproduct_int16_altivec(const int16_t *v1, const int16_t *v2,
82  int order)
83 {
84  int i;
85  LOAD_ZERO;
86  const vec_s16 *pv;
87  register vec_s16 vec1;
88  register vec_s32 res = vec_splat_s32(0), t;
89  int32_t ires;
90 
91  for(i = 0; i < order; i += 8){
92  pv = (const vec_s16*)v1;
93  vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1));
94  t = vec_msum(vec1, vec_ld(0, v2), zero_s32v);
95  res = vec_sums(t, res);
96  v1 += 8;
97  v2 += 8;
98  }
99  res = vec_splat(res, 3);
100  vec_ste(res, 0, &ires);
101  return ires;
102 }
103 
104 static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul)
105 {
106  LOAD_ZERO;
107  vec_s16 *pv1 = (vec_s16*)v1;
108  register vec_s16 muls = {mul,mul,mul,mul,mul,mul,mul,mul};
109  register vec_s16 t0, t1, i0, i1, i4;
110  register vec_s16 i2 = vec_ld(0, v2), i3 = vec_ld(0, v3);
111  register vec_s32 res = zero_s32v;
112  register vec_u8 align = vec_lvsl(0, v2);
113  int32_t ires;
114  order >>= 4;
115  do {
116  i1 = vec_ld(16, v2);
117  t0 = vec_perm(i2, i1, align);
118  i2 = vec_ld(32, v2);
119  t1 = vec_perm(i1, i2, align);
120  i0 = pv1[0];
121  i1 = pv1[1];
122  res = vec_msum(t0, i0, res);
123  res = vec_msum(t1, i1, res);
124  i4 = vec_ld(16, v3);
125  t0 = vec_perm(i3, i4, align);
126  i3 = vec_ld(32, v3);
127  t1 = vec_perm(i4, i3, align);
128  pv1[0] = vec_mladd(t0, muls, i0);
129  pv1[1] = vec_mladd(t1, muls, i1);
130  pv1 += 2;
131  v2 += 8;
132  v3 += 8;
133  } while(--order);
134  res = vec_splat(vec_sums(res, zero_s32v), 3);
135  vec_ste(res, 0, &ires);
136  return ires;
137 }
138 
140 {
144 }