00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00026 #include "libavcodec/dsputil.h"
00027
00028 #include "gcc_fixes.h"
00029
00030 #include "dsputil_altivec.h"
00031
00032 #include "types_altivec.h"
00033
00034 static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
00035 int size) {
00036 int i, size16;
00037 vector signed char vpix1;
00038 vector signed short vpix2, vdiff, vpix1l,vpix1h;
00039 union { vector signed int vscore;
00040 int32_t score[4];
00041 } u;
00042 u.vscore = vec_splat_s32(0);
00043
00044
00045
00046 #define vec_unaligned_load(b) \
00047 vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b));
00048
00049 size16 = size >> 4;
00050 while(size16) {
00051
00052
00053
00054 vpix1 = vec_unaligned_load(pix1);
00055 vpix2 = vec_unaligned_load(pix2);
00056 pix2 += 8;
00057
00058 vpix1h = vec_unpackh(vpix1);
00059 vdiff = vec_sub(vpix1h, vpix2);
00060 vpix1l = vec_unpackl(vpix1);
00061
00062 vpix2 = vec_unaligned_load(pix2);
00063 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
00064 vdiff = vec_sub(vpix1l, vpix2);
00065 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
00066 pix1 += 16;
00067 pix2 += 8;
00068 size16--;
00069 }
00070 u.vscore = vec_sums(u.vscore, vec_splat_s32(0));
00071
00072 size %= 16;
00073 for (i = 0; i < size; i++) {
00074 u.score[3] += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
00075 }
00076 return u.score[3];
00077 }
00078
00079 static void add_int16_altivec(int16_t * v1, int16_t * v2, int order)
00080 {
00081 int i;
00082 register vec_s16 vec, *pv;
00083
00084 for(i = 0; i < order; i += 8){
00085 pv = (vec_s16*)v2;
00086 vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2));
00087 vec_st(vec_add(vec_ld(0, v1), vec), 0, v1);
00088 v1 += 8;
00089 v2 += 8;
00090 }
00091 }
00092
00093 static void sub_int16_altivec(int16_t * v1, int16_t * v2, int order)
00094 {
00095 int i;
00096 register vec_s16 vec, *pv;
00097
00098 for(i = 0; i < order; i += 8){
00099 pv = (vec_s16*)v2;
00100 vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2));
00101 vec_st(vec_sub(vec_ld(0, v1), vec), 0, v1);
00102 v1 += 8;
00103 v2 += 8;
00104 }
00105 }
00106
00107 static int32_t scalarproduct_int16_altivec(int16_t * v1, int16_t * v2, int order, const int shift)
00108 {
00109 int i;
00110 LOAD_ZERO;
00111 register vec_s16 vec1, *pv;
00112 register vec_s32 res = vec_splat_s32(0), t;
00113 register vec_u32 shifts;
00114 DECLARE_ALIGNED_16(int32_t, ires);
00115
00116 shifts = zero_u32v;
00117 if(shift & 0x10) shifts = vec_add(shifts, vec_sl(vec_splat_u32(0x08), vec_splat_u32(0x1)));
00118 if(shift & 0x08) shifts = vec_add(shifts, vec_splat_u32(0x08));
00119 if(shift & 0x04) shifts = vec_add(shifts, vec_splat_u32(0x04));
00120 if(shift & 0x02) shifts = vec_add(shifts, vec_splat_u32(0x02));
00121 if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01));
00122
00123 for(i = 0; i < order; i += 8){
00124 pv = (vec_s16*)v1;
00125 vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1));
00126 t = vec_msum(vec1, vec_ld(0, v2), zero_s32v);
00127 t = vec_sr(t, shifts);
00128 res = vec_sums(t, res);
00129 v1 += 8;
00130 v2 += 8;
00131 }
00132 res = vec_splat(res, 3);
00133 vec_ste(res, 0, &ires);
00134 return ires;
00135 }
00136
00137 void int_init_altivec(DSPContext* c, AVCodecContext *avctx)
00138 {
00139 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec;
00140 c->add_int16 = add_int16_altivec;
00141 c->sub_int16 = sub_int16_altivec;
00142 c->scalarproduct_int16 = scalarproduct_int16_altivec;
00143 }