FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideodsp.c
Go to the documentation of this file.
1 /*
2  * GMC (Global Motion Compensation), AltiVec-enabled
3  *
4  * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/cpu.h"
24 #include "libavutil/mem.h"
25 #include "libavutil/ppc/cpu.h"
27 
29 
30 #if HAVE_ALTIVEC
31 /* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8
32  * to preserve proper dst alignment. */
33 static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
34  int stride, int h, int x16, int y16, int rounder)
35 {
36  int i;
37  const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
38  const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = {
39  (16 - x16) * (16 - y16), /* A */
40  (x16) * (16 - y16), /* B */
41  (16 - x16) * (y16), /* C */
42  (x16) * (y16), /* D */
43  0, 0, 0, 0 /* padding */
44  };
45  register const vector unsigned char vczero =
46  (const vector unsigned char) vec_splat_u8(0);
47  register const vector unsigned short vcsr8 =
48  (const vector unsigned short) vec_splat_u16(8);
49  register vector unsigned char dstv, dstv2, srcvB, srcvC, srcvD;
50  register vector unsigned short tempB, tempC, tempD;
51  unsigned long dst_odd = (unsigned long) dst & 0x0000000F;
52  unsigned long src_really_odd = (unsigned long) src & 0x0000000F;
53  register vector unsigned short tempA =
54  vec_ld(0, (const unsigned short *) ABCD);
55  register vector unsigned short Av = vec_splat(tempA, 0);
56  register vector unsigned short Bv = vec_splat(tempA, 1);
57  register vector unsigned short Cv = vec_splat(tempA, 2);
58  register vector unsigned short Dv = vec_splat(tempA, 3);
59  register vector unsigned short rounderV =
60  vec_splat((vec_u16) vec_lde(0, &rounder_a), 0);
61 
62  /* we'll be able to pick-up our 9 char elements at src from those
63  * 32 bytes we load the first batch here, as inside the loop we can
64  * reuse 'src + stride' from one iteration as the 'src' of the next. */
65  register vector unsigned char src_0 = vec_ld(0, src);
66  register vector unsigned char src_1 = vec_ld(16, src);
67  register vector unsigned char srcvA = vec_perm(src_0, src_1,
68  vec_lvsl(0, src));
69 
70  if (src_really_odd != 0x0000000F)
71  /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
72  * on the second vector. */
73  srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
74  else
75  srcvB = src_1;
76  srcvA = vec_mergeh(vczero, srcvA);
77  srcvB = vec_mergeh(vczero, srcvB);
78 
79  for (i = 0; i < h; i++) {
80  dst_odd = (unsigned long) dst & 0x0000000F;
81  src_really_odd = (((unsigned long) src) + stride) & 0x0000000F;
82 
83  dstv = vec_ld(0, dst);
84 
85  /* We'll be able to pick-up our 9 char elements at src + stride from
86  * those 32 bytes then reuse the resulting 2 vectors srvcC and srcvD
87  * as the next srcvA and srcvB. */
88  src_0 = vec_ld(stride + 0, src);
89  src_1 = vec_ld(stride + 16, src);
90  srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
91 
92  if (src_really_odd != 0x0000000F)
93  /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
94  * on the second vector. */
95  srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
96  else
97  srcvD = src_1;
98 
99  srcvC = vec_mergeh(vczero, srcvC);
100  srcvD = vec_mergeh(vczero, srcvD);
101 
102  /* OK, now we (finally) do the math :-)
103  * Those four instructions replace 32 int muls & 32 int adds.
104  * Isn't AltiVec nice? */
105  tempA = vec_mladd((vector unsigned short) srcvA, Av, rounderV);
106  tempB = vec_mladd((vector unsigned short) srcvB, Bv, tempA);
107  tempC = vec_mladd((vector unsigned short) srcvC, Cv, tempB);
108  tempD = vec_mladd((vector unsigned short) srcvD, Dv, tempC);
109 
110  srcvA = srcvC;
111  srcvB = srcvD;
112 
113  tempD = vec_sr(tempD, vcsr8);
114 
115  dstv2 = vec_pack(tempD, (vector unsigned short) vczero);
116 
117  if (dst_odd)
118  dstv2 = vec_perm(dstv, dstv2, vcprm(0, 1, s0, s1));
119  else
120  dstv2 = vec_perm(dstv, dstv2, vcprm(s0, s1, 2, 3));
121 
122  vec_st(dstv2, 0, dst);
123 
124  dst += stride;
125  src += stride;
126  }
127 }
128 #endif /* HAVE_ALTIVEC */
129 
131 {
132 #if HAVE_ALTIVEC
134  return;
135 
136  c->gmc1 = gmc1_altivec;
137 #endif /* HAVE_ALTIVEC */
138 }
Memory handling functions.
#define src
Definition: vp8dsp.c:254
uint8_t
#define av_cold
Definition: attributes.h:82
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
void(* gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder)
translational global motion compensation.
Definition: mpegvideodsp.h:32
#define PPC_ALTIVEC(flags)
Definition: cpu.h:26
#define s0
Definition: regdef.h:37
#define s1
Definition: regdef.h:38
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:92
Contains misc utility macros and inline functions.
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
static double c[64]
#define vec_u16
Definition: util_altivec.h:36
av_cold void ff_mpegvideodsp_init_ppc(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:130
#define stride