[FFmpeg-devel] [PATCH] Altivec split-radix FFT
Loren Merritt
lorenm
Thu Aug 27 14:44:28 CEST 2009
Now with MDCT, and tested on PPC64.
--Loren Merritt
-------------- next part --------------
>From 8c0971cee38de3e989cd9543edf73e5b5e4fa824 Mon Sep 17 00:00:00 2001
From: Loren Merritt <pengvado at akuvian.org>
Date: Sat, 22 Aug 2009 11:22:09 +0100
Subject: [PATCH 1/3] altivec split-radix FFT
1.8x faster than altivec radix-2 on a G4
8% faster vorbis decoding
---
libavcodec/Makefile | 1 +
libavcodec/fft.c | 1 -
libavcodec/ppc/fft_altivec.c | 151 +++++++------------
libavcodec/ppc/fft_altivec_s.S | 320 ++++++++++++++++++++++++++++++++++++++++
libavcodec/ppc/types_altivec.h | 1 +
5 files changed, 374 insertions(+), 100 deletions(-)
create mode 100644 libavcodec/ppc/fft_altivec_s.S
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 24e6af4..5a9113b 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -523,6 +523,7 @@ OBJS-$(HAVE_ALTIVEC) += ppc/check_altivec.o \
ppc/dsputil_altivec.o \
ppc/fdct_altivec.o \
ppc/fft_altivec.o \
+ ppc/fft_altivec_s.o \
ppc/float_altivec.o \
ppc/gmc_altivec.o \
ppc/idct_altivec.o \
diff --git a/libavcodec/fft.c b/libavcodec/fft.c
index a3f1151..d864f82 100644
--- a/libavcodec/fft.c
+++ b/libavcodec/fft.c
@@ -110,7 +110,6 @@ av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
has_vectors = mm_support();
if (has_vectors & FF_MM_ALTIVEC) {
s->fft_calc = ff_fft_calc_altivec;
- split_radix = 0;
}
#endif
diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c
index 7391131..1def80d 100644
--- a/libavcodec/ppc/fft_altivec.c
+++ b/libavcodec/ppc/fft_altivec.c
@@ -1,8 +1,7 @@
/*
* FFT/IFFT transforms
* AltiVec-enabled
- * Copyright (c) 2003 Romain Dolbeau <romain at dolbeau.org>
- * Based on code Copyright (c) 2002 Fabrice Bellard
+ * Copyright (c) 2009 Loren Merritt
*
* This file is part of FFmpeg.
*
@@ -23,6 +22,7 @@
#include "libavcodec/dsputil.h"
#include "dsputil_ppc.h"
#include "util_altivec.h"
+#include "types_altivec.h"
/**
* Do a complex FFT with the parameters defined in ff_fft_init(). The
* input data must be permuted before with s->revtab table. No
@@ -30,106 +30,59 @@
* AltiVec-enabled
* This code assumes that the 'z' pointer is 16 bytes-aligned
* It also assumes all FFTComplex are 8 bytes-aligned pair of float
- * The code is exactly the same as the SSE version, except
- * that successive MUL + ADD/SUB have been merged into
- * fused multiply-add ('vec_madd' in altivec)
*/
-void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
-{
-POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6);
- register const vector float vczero = (const vector float)vec_splat_u32(0.);
- int ln = s->nbits;
- int j, np, np2;
- int nblocks, nloops;
- register FFTComplex *p, *q;
- FFTComplex *cptr, *cptr1;
- int k;
-
-POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
-
- np = 1 << ln;
-
- {
- vector float *r, a, b, a1, c1, c2;
-
- r = (vector float *)&z[0];
-
- c1 = vcii(p,p,n,n);
-
- if (s->inverse) {
- c2 = vcii(p,p,n,p);
- } else {
- c2 = vcii(p,p,p,n);
- }
-
- j = (np >> 2);
- do {
- a = vec_ld(0, r);
- a1 = vec_ld(sizeof(vector float), r);
-
- b = vec_perm(a,a,vcprmle(1,0,3,2));
- a = vec_madd(a,c1,b);
- /* do the pass 0 butterfly */
-
- b = vec_perm(a1,a1,vcprmle(1,0,3,2));
- b = vec_madd(a1,c1,b);
- /* do the pass 0 butterfly */
-
- /* multiply third by -i */
- b = vec_perm(b,b,vcprmle(2,3,1,0));
-
- /* do the pass 1 butterfly */
- vec_st(vec_madd(b,c2,a), 0, r);
- vec_st(vec_nmsub(b,c2,a), sizeof(vector float), r);
-
- r += 2;
- } while (--j != 0);
- }
- /* pass 2 .. ln-1 */
-
- nblocks = np >> 3;
- nloops = 1 << 2;
- np2 = np >> 1;
-
- cptr1 = s->exptab1;
- do {
- p = z;
- q = z + nloops;
- j = nblocks;
- do {
- cptr = cptr1;
- k = nloops >> 1;
- do {
- vector float a,b,c,t1;
-
- a = vec_ld(0, (float*)p);
- b = vec_ld(0, (float*)q);
-
- /* complex mul */
- c = vec_ld(0, (float*)cptr);
- /* cre*re cim*re */
- t1 = vec_madd(c, vec_perm(b,b,vcprmle(2,2,0,0)),vczero);
- c = vec_ld(sizeof(vector float), (float*)cptr);
- /* -cim*im cre*im */
- b = vec_madd(c, vec_perm(b,b,vcprmle(3,3,1,1)),t1);
-
- /* butterfly */
- vec_st(vec_add(a,b), 0, (float*)p);
- vec_st(vec_sub(a,b), 0, (float*)q);
-
- p += 2;
- q += 2;
- cptr += 4;
- } while (--k);
-
- p += nloops;
- q += nloops;
- } while (--j);
- cptr1 += nloops * 2;
- nblocks = nblocks >> 1;
- nloops = nloops << 1;
- } while (nblocks != 0);
-
-POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
-}
+extern FFTSample ff_cos_16[];
+// pointers to functions. but unlike function pointers on some PPC ABIs, these aren't function descriptors.
+extern void *ff_fft_dispatch_altivec[2][15];
+
+// convert from simd order to C order
+static void swizzle(vec_f *z, int n)
+{
+ int i;
+ n >>= 1;
+ for(i=0; i<n; i+=2) {
+ vec_f re = z[i];
+ vec_f im = z[i+1];
+ z[i] = vec_mergeh(re, im);
+ z[i+1] = vec_mergel(re, im);
+ }
+}
+
+void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
+{
+ register vec_f v14 __asm__("v14") = {0,0,0,0};
+ register vec_f v15 __asm__("v15") = *(const vec_f*)ff_cos_16;
+ register vec_f v16 __asm__("v16") = {0, 0.38268343, M_SQRT1_2, 0.92387953};
+ register vec_f v17 __asm__("v17") = {-M_SQRT1_2, M_SQRT1_2, M_SQRT1_2,-M_SQRT1_2};
+ register vec_f v18 __asm__("v18") = { M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2};
+ register vec_u8 v19 __asm__("v19") = vcprm(s0,3,2,1);
+ register vec_u8 v20 __asm__("v20") = vcprm(0,1,s2,s1);
+ register vec_u8 v21 __asm__("v21") = vcprm(2,3,s0,s3);
+ register vec_u8 v22 __asm__("v22") = vcprm(2,s3,3,s2);
+ register vec_u8 v23 __asm__("v23") = vcprm(0,1,s0,s1);
+ register vec_u8 v24 __asm__("v24") = vcprm(2,3,s2,s3);
+ register vec_u8 v25 __asm__("v25") = vcprm(2,3,0,1);
+ register vec_u8 v26 __asm__("v26") = vcprm(1,2,s3,s0);
+ register vec_u8 v27 __asm__("v27") = vcprm(0,3,s2,s1);
+ register vec_u8 v28 __asm__("v28") = vcprm(0,2,s1,s3);
+ register vec_u8 v29 __asm__("v29") = vcprm(1,3,s0,s2);
+ register FFTSample **cos_tabs __asm__("r12") = ff_cos_tabs;
+ register FFTComplex *zarg __asm__("r3") = z;
+ __asm__(
+ "mtctr %0 \n"
+ "li 9,16 \n"
+ "subi 1,1,%1 \n"
+ "bctrl \n"
+ "addi 1,1,%1 \n"
+ ::"r"(ff_fft_dispatch_altivec[1][s->nbits-2]), "i"(12*sizeof(void*)),
+ "r"(zarg), "r"(cos_tabs),
+ "v"(v14),"v"(v15),"v"(v16),"v"(v17),"v"(v18),"v"(v19),"v"(v20),"v"(v21),
+ "v"(v22),"v"(v23),"v"(v24),"v"(v25),"v"(v26),"v"(v27),"v"(v28),"v"(v29)
+ : "lr","ctr","r0","r1","r4","r5","r6","r7","r8","r9","r10","r11",
+ "v0","v1","v2","v3","v4","v5","v6","v7","v8","v9","v10","v11","v12","v13"
+ );
+ if(s->nbits <= 4)
+ swizzle((vec_f*)z, 1<<s->nbits);
+}
+
diff --git a/libavcodec/ppc/fft_altivec_s.S b/libavcodec/ppc/fft_altivec_s.S
new file mode 100644
index 0000000..048089e
--- /dev/null
+++ b/libavcodec/ppc/fft_altivec_s.S
@@ -0,0 +1,320 @@
+/*
+ * FFT transform with Altivec optimizations
+ * Copyright (c) 2009 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * These functions are not individually interchangeable with the C versions.
+ * While C takes arrays of FFTComplex, Altivec leaves intermediate results
+ * in blocks as convenient to the vector size.
+ * i.e. {4x real, 4x imaginary, 4x real, ...}
+ *
+ * I ignore standard calling convention.
+ * Instead, the following registers are treated as global constants:
+ * v14: zero
+ * v15..v18: cosines
+ * v19..v29: permutations
+ * r9: 16
+ * r12: ff_cos_tabs
+ * and the rest are free for local use.
+ */
+
+.text
+
+.macro slwi ra, rb, imm
+ rlwinm \ra, \rb, \imm, 0, 31-\imm
+.endm
+
+.macro addi2 ra, imm // add 32-bit immediate
+.if \imm & 0xffff
+ addi \ra, \ra, \imm at l
+.endif
+.if (\imm+0x8000)>>16
+ addis \ra, \ra, ((\imm+0x8000)>>16) + (\imm&0x800000000000)*0x1fffe
+.endif
+.endm
+
+#if _ARCH_PPC64
+#define PTR .quad
+.macro LOAD_PTR ra, rbase, offset
+ ld \ra,(\offset)*8(\rbase)
+.endm
+.macro STORE_PTR ra, rbase, offset
+ std \ra,(\offset)*8(\rbase)
+.endm
+#else
+#define PTR .int
+.macro LOAD_PTR ra, rbase, offset
+ lwz \ra,(\offset)*4(\rbase)
+.endm
+.macro STORE_PTR ra, rbase, offset
+ stw \ra,(\offset)*4(\rbase)
+.endm
+#endif
+
+.macro FFT4 a0, a1, a2, a3 // in:0-1 out:2-3
+ vperm \a2,\a0,\a1,20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
+ vperm \a3,\a0,\a1,21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
+ vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5}
+ vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7}
+ vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
+ vperm \a3,\a0,\a1,22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
+ vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1}
+ vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3}
+ vperm \a2,\a0,\a1,23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
+ vperm \a3,\a0,\a1,24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
+.endm
+
+.macro FFT4x2 a0, a1, b0, b1, a2, a3, b2, b3
+ vperm \a2,\a0,\a1,20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
+ vperm \a3,\a0,\a1,21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
+ vperm \b2,\b0,\b1,20
+ vperm \b3,\b0,\b1,21
+ vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5}
+ vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7}
+ vaddfp \b0,\b2,\b3
+ vsubfp \b1,\b2,\b3
+ vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
+ vperm \a3,\a0,\a1,22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
+ vmrghw \b2,\b0,\b1
+ vperm \b3,\b0,\b1,22
+ vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1}
+ vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3}
+ vaddfp \b0,\b2,\b3
+ vsubfp \b1,\b2,\b3
+ vperm \a2,\a0,\a1,23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
+ vperm \a3,\a0,\a1,24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
+ vperm \b2,\b0,\b1,23
+ vperm \b3,\b0,\b1,24
+.endm
+
+.macro FFT8 a0, a1, b0, b1, a2, a3, b2, b3, b4 // in,out:a0-b1
+ vmrghw \b2,\b0,\b1 // vcprm(0,s0,1,s1) // {r4,r6,i4,i6}
+ vmrglw \b3,\b0,\b1 // vcprm(2,s2,3,s3) // {r5,r7,i5,i7}
+ vperm \a2,\a0,\a1,20 // FFT4 ...
+ vperm \a3,\a0,\a1,21
+ vaddfp \b0,\b2,\b3 // {t1,t3,t2,t4}
+ vsubfp \b1,\b2,\b3 // {r5,r7,i5,i7}
+ vperm \b4,\b1,\b1,25 // vcprm(2,3,0,1) // {i5,i7,r5,r7}
+ vaddfp \a0,\a2,\a3
+ vsubfp \a1,\a2,\a3
+ vmaddfp \b1,\b1,17,14 // * {-1,1,1,-1}/sqrt(2)
+ vmaddfp \b1,\b4,18,\b1 // * { 1,1,1,1 }/sqrt(2) // {t8,ta,t7,t9}
+ vmrghw \a2,\a0,\a1
+ vperm \a3,\a0,\a1,22
+ vperm \b2,\b0,\b1,26 // vcprm(1,2,s3,s0) // {t3,t2,t9,t8}
+ vperm \b3,\b0,\b1,27 // vcprm(0,3,s2,s1) // {t1,t4,t7,ta}
+ vaddfp \a0,\a2,\a3
+ vsubfp \a1,\a2,\a3
+ vaddfp \b0,\b2,\b3 // {t1,t2,t9,ta}
+ vsubfp \b1,\b2,\b3 // {t6,t5,tc,tb}
+ vperm \a2,\a0,\a1,23
+ vperm \a3,\a0,\a1,24
+ vperm \b2,\b0,\b1,28 // vcprm(0,2,s1,s3) // {t1,t9,t5,tb}
+ vperm \b3,\b0,\b1,29 // vcprm(1,3,s0,s2) // {t2,ta,t6,tc}
+ vsubfp \b0,\a2,\b2 // {r4,r5,r6,r7}
+ vsubfp \b1,\a3,\b3 // {i4,i5,i6,i7}
+ vaddfp \a0,\a2,\b2 // {r0,r1,r2,r3}
+ vaddfp \a1,\a3,\b3 // {i0,i1,i2,i3}
+.endm
+
+.macro BF d0,d1,s0,s1
+ vsubfp \d1,\s0,\s1
+ vaddfp \d0,\s0,\s1
+.endm
+
+fft4_altivec:
+ lvx 0,0,3
+ lvx 1,9,3
+ FFT4 0,1,2,3
+ stvx 2,0,3
+ stvx 3,9,3
+ blr
+
+fft8_altivec:
+ addi 4,3,32
+ lvx 0,0,3
+ lvx 1,9,3
+ lvx 2,0,4
+ lvx 3,9,4
+ FFT8 0,1,2,3,4,5,6,7,8
+ stvx 0,0,3
+ stvx 1,9,3
+ stvx 2,0,4
+ stvx 3,9,4
+ blr
+
+fft16_altivec:
+ addi 5,3,64
+ addi 6,3,96
+ addi 4,3,32
+ lvx 0,0,5
+ lvx 1,9,5
+ lvx 2,0,6
+ lvx 3,9,6
+ FFT4x2 0,1,2,3,4,5,6,7
+ lvx 0,0,3
+ lvx 1,9,3
+ lvx 2,0,4
+ lvx 3,9,4
+ FFT8 0,1,2,3,8,9,10,11,12
+ vmaddfp 8,4,15,14 // r2*wre
+ vmaddfp 9,5,15,14 // i2*wre
+ vmaddfp 10,6,15,14 // r3*wre
+ vmaddfp 11,7,15,14 // i3*wre
+ vmaddfp 8,5,16,8 // i2*wim
+ vnmsubfp 9,4,16,9 // r2*wim
+ vnmsubfp 10,7,16,10 // i3*wim
+ vmaddfp 11,6,16,11 // r3*wim
+ BF 10,12,10,8
+ BF 11,13,9,11
+ BF 0,4,0,10
+ BF 3,7,3,12
+ stvx 0,0,3
+ stvx 4,0,5
+ stvx 3,9,4
+ stvx 7,9,6
+ BF 1,5,1,11
+ BF 2,6,2,13
+ stvx 1,9,3
+ stvx 5,9,5
+ stvx 2,0,4
+ stvx 6,0,6
+ blr
+
+// void pass(float *z, float *wre, int n)
+.macro PASS interleave
+fft_pass\interleave\()_altivec:
+ mtctr 5
+ slwi 0,5,4
+ slwi 7,5,6 // o2
+ slwi 5,5,5 // o1
+ add 10,5,7 // o3
+ add 0,4,0 // wim
+ addi 6,5,16 // o1+16
+ addi 8,7,16 // o2+16
+ addi 11,10,16// o3+16
+1:
+ lvx 8,0,4 // wre
+ lvx 10,0,0 // wim
+ sub 0,0,9
+ lvx 9,0,0
+ vperm 9,9,10,19 // vcprm(s0,3,2,1) => wim[0 .. -3]
+ lvx 4,3,7 // r2 = z[o2]
+ lvx 5,3,8 // i2 = z[o2+16]
+ lvx 6,3,10 // r3 = z[o3]
+ lvx 7,3,11 // i3 = z[o3+16]
+ vmaddfp 10,4,8,14 // r2*wre
+ vmaddfp 11,5,8,14 // i2*wre
+ vmaddfp 12,6,8,14 // r3*wre
+ vmaddfp 13,7,8,14 // i3*wre
+ lvx 0,0,3 // r0 = z[0]
+ lvx 3,3,6 // i1 = z[o1+16]
+ vmaddfp 10,5,9,10 // i2*wim
+ vnmsubfp 11,4,9,11 // r2*wim
+ vnmsubfp 12,7,9,12 // i3*wim
+ vmaddfp 13,6,9,13 // r3*wim
+ lvx 1,3,9 // i0 = z[16]
+ lvx 2,3,5 // r1 = z[o1]
+ BF 12,8,12,10
+ BF 13,9,11,13
+ BF 0,4,0,12
+ BF 3,7,3,8
+.ifb \interleave
+ stvx 0,0,3
+ stvx 4,3,7
+ stvx 3,3,6
+ stvx 7,3,11
+.endif
+ BF 1,5,1,13
+ BF 2,6,2,9
+.ifb \interleave
+ stvx 1,3,9
+ stvx 2,3,5
+ stvx 5,3,8
+ stvx 6,3,10
+.else
+ vmrghw 8,0,1
+ vmrglw 9,0,1
+ stvx 8,0,3
+ stvx 9,3,9
+ vmrghw 8,2,3
+ vmrglw 9,2,3
+ stvx 8,3,5
+ stvx 9,3,6
+ vmrghw 8,4,5
+ vmrglw 9,4,5
+ stvx 8,3,7
+ stvx 9,3,8
+ vmrghw 8,6,7
+ vmrglw 9,6,7
+ stvx 8,3,10
+ stvx 9,3,11
+.endif
+ addi 3,3,32
+ addi 4,4,16
+ bdnz 1b
+ sub 3,3,5
+ blr
+.endm
+
+.macro DECL_FFT ver, bits, n, n2, n4
+fft\n\ver\()_altivec:
+ mflr 0
+ STORE_PTR 0,1,\bits-5
+ bl fft\n2\()_altivec
+ addi2 3,\n*4
+ bl fft\n4\()_altivec
+ addi2 3,\n*2
+ bl fft\n4\()_altivec
+ addi2 3,\n*-6
+ LOAD_PTR 0,1,\bits-5
+ LOAD_PTR 4,12,\bits-4
+ mtlr 0
+ li 5,\n/16
+ b fft_pass\ver\()_altivec
+.endm
+
+.macro DECL_FFTS ver
+PASS \ver
+DECL_FFT \ver,5,32,16,8
+DECL_FFT \ver,6,64,32,16
+DECL_FFT \ver,7,128,64,32
+DECL_FFT \ver,8,256,128,64
+DECL_FFT \ver,9,512,256,128
+DECL_FFT \ver,10,1024,512,256
+DECL_FFT \ver,11,2048,1024,512
+DECL_FFT \ver,12,4096,2048,1024
+DECL_FFT \ver,13,8192,4096,2048
+DECL_FFT \ver,14,16384,8192,4096
+DECL_FFT \ver,15,32768,16384,8192
+DECL_FFT \ver,16,65536,32768,16384
+.rodata
+.global ff_fft_dispatch\ver\()_altivec
+ff_fft_dispatch\ver\()_altivec:
+PTR fft4_altivec, fft8_altivec, fft16_altivec, fft32\ver\()_altivec,\
+ fft64\ver\()_altivec, fft128\ver\()_altivec, fft256\ver\()_altivec,\
+ fft512\ver\()_altivec, fft1024\ver\()_altivec, fft2048\ver\()_altivec,\
+ fft4096\ver\()_altivec, fft8192\ver\()_altivec, fft16384\ver\()_altivec,\
+ fft32768\ver\()_altivec, fft65536\ver\()_altivec
+.text
+.endm
+
+DECL_FFTS
+DECL_FFTS _interleave
diff --git a/libavcodec/ppc/types_altivec.h b/libavcodec/ppc/types_altivec.h
index 2870e83..36b6e1f 100644
--- a/libavcodec/ppc/types_altivec.h
+++ b/libavcodec/ppc/types_altivec.h
@@ -30,6 +30,7 @@
#define vec_s16 vector signed short
#define vec_u32 vector unsigned int
#define vec_s32 vector signed int
+#define vec_f vector float
/***********************************************************************
* Null vector
--
1.6.4
-------------- next part --------------
>From 8456248f3c63ee27d903f33e41fa3058d4022430 Mon Sep 17 00:00:00 2001
From: Loren Merritt <pengvado at akuvian.org>
Date: Mon, 24 Aug 2009 01:59:29 +0100
Subject: [PATCH 2/3] remove vestiges of radix-2 FFT
---
libavcodec/dsputil.h | 2 -
libavcodec/fft.c | 107 ++++++++-----------------------------------------
2 files changed, 18 insertions(+), 91 deletions(-)
diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h
index 2d15bd3..19c55a5 100644
--- a/libavcodec/dsputil.h
+++ b/libavcodec/dsputil.h
@@ -691,8 +691,6 @@ typedef struct FFTContext {
int nbits;
int inverse;
uint16_t *revtab;
- FFTComplex *exptab;
- FFTComplex *exptab1; /* only used by SSE code */
FFTComplex *tmp_buf;
void (*fft_permute)(struct FFTContext *s, FFTComplex *z);
void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
diff --git a/libavcodec/fft.c b/libavcodec/fft.c
index d864f82..bf791c1 100644
--- a/libavcodec/fft.c
+++ b/libavcodec/fft.c
@@ -60,9 +60,7 @@ static int split_radix_permutation(int i, int n, int inverse)
av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
{
- int i, j, m, n;
- float alpha, c1, s1, s2;
- int split_radix = 1;
+ int i, j, n;
int av_unused has_vectors;
if (nbits < 2 || nbits > 16)
@@ -70,22 +68,18 @@ av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
s->nbits = nbits;
n = 1 << nbits;
- s->tmp_buf = NULL;
- s->exptab = av_malloc((n / 2) * sizeof(FFTComplex));
- if (!s->exptab)
- goto fail;
s->revtab = av_malloc(n * sizeof(uint16_t));
if (!s->revtab)
goto fail;
+ s->tmp_buf = av_malloc(n * sizeof(FFTComplex));
+ if (!s->tmp_buf)
+ goto fail;
s->inverse = inverse;
- s2 = inverse ? 1.0 : -1.0;
-
s->fft_permute = ff_fft_permute_c;
s->fft_calc = ff_fft_calc_c;
s->imdct_calc = ff_imdct_calc_c;
s->imdct_half = ff_imdct_half_c;
- s->exptab1 = NULL;
#if HAVE_MMX && HAVE_YASM
has_vectors = mm_support();
@@ -113,103 +107,38 @@ av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
}
#endif
- if (split_radix) {
- for(j=4; j<=nbits; j++) {
- int m = 1<<j;
- double freq = 2*M_PI/m;
- FFTSample *tab = ff_cos_tabs[j-4];
- for(i=0; i<=m/4; i++)
- tab[i] = cos(i*freq);
- for(i=1; i<m/4; i++)
- tab[m/2-i] = tab[i];
- }
- for(i=0; i<n; i++)
- s->revtab[-split_radix_permutation(i, n, s->inverse) & (n-1)] = i;
- s->tmp_buf = av_malloc(n * sizeof(FFTComplex));
- } else {
- int np, nblocks, np2, l;
- FFTComplex *q;
-
- for(i=0; i<(n/2); i++) {
- alpha = 2 * M_PI * (float)i / (float)n;
- c1 = cos(alpha);
- s1 = sin(alpha) * s2;
- s->exptab[i].re = c1;
- s->exptab[i].im = s1;
- }
-
- np = 1 << nbits;
- nblocks = np >> 3;
- np2 = np >> 1;
- s->exptab1 = av_malloc(np * 2 * sizeof(FFTComplex));
- if (!s->exptab1)
- goto fail;
- q = s->exptab1;
- do {
- for(l = 0; l < np2; l += 2 * nblocks) {
- *q++ = s->exptab[l];
- *q++ = s->exptab[l + nblocks];
-
- q->re = -s->exptab[l].im;
- q->im = s->exptab[l].re;
- q++;
- q->re = -s->exptab[l + nblocks].im;
- q->im = s->exptab[l + nblocks].re;
- q++;
- }
- nblocks = nblocks >> 1;
- } while (nblocks != 0);
- av_freep(&s->exptab);
-
- /* compute bit reverse table */
- for(i=0;i<n;i++) {
- m=0;
- for(j=0;j<nbits;j++) {
- m |= ((i >> j) & 1) << (nbits-j-1);
- }
- s->revtab[i]=m;
- }
+ for(j=4; j<=nbits; j++) {
+ int m = 1<<j;
+ double freq = 2*M_PI/m;
+ FFTSample *tab = ff_cos_tabs[j-4];
+ for(i=0; i<=m/4; i++)
+ tab[i] = cos(i*freq);
+ for(i=1; i<m/4; i++)
+ tab[m/2-i] = tab[i];
}
+ for(i=0; i<n; i++)
+ s->revtab[-split_radix_permutation(i, n, s->inverse) & (n-1)] = i;
return 0;
fail:
av_freep(&s->revtab);
- av_freep(&s->exptab);
- av_freep(&s->exptab1);
av_freep(&s->tmp_buf);
return -1;
}
void ff_fft_permute_c(FFTContext *s, FFTComplex *z)
{
- int j, k, np;
- FFTComplex tmp;
+ int j, np;
const uint16_t *revtab = s->revtab;
np = 1 << s->nbits;
-
- if (s->tmp_buf) {
- /* TODO: handle split-radix permute in a more optimal way, probably in-place */
- for(j=0;j<np;j++) s->tmp_buf[revtab[j]] = z[j];
- memcpy(z, s->tmp_buf, np * sizeof(FFTComplex));
- return;
- }
-
- /* reverse */
- for(j=0;j<np;j++) {
- k = revtab[j];
- if (k < j) {
- tmp = z[k];
- z[k] = z[j];
- z[j] = tmp;
- }
- }
+ /* TODO: handle split-radix permute in a more optimal way, probably in-place */
+ for(j=0;j<np;j++) s->tmp_buf[revtab[j]] = z[j];
+ memcpy(z, s->tmp_buf, np * sizeof(FFTComplex));
}
av_cold void ff_fft_end(FFTContext *s)
{
av_freep(&s->revtab);
- av_freep(&s->exptab);
- av_freep(&s->exptab1);
av_freep(&s->tmp_buf);
}
--
1.6.4
-------------- next part --------------
>From c523f5f4667e4d4efe11285be7c4325ff5534f94 Mon Sep 17 00:00:00 2001
From: Loren Merritt <pengvado at akuvian.org>
Date: Wed, 26 Aug 2009 10:08:06 +0100
Subject: [PATCH 3/3] altivec iMDCT
1.8x faster than C iMDCT (excluding the FFT part) on a G4
10% faster vorbis decoding
---
libavcodec/dsputil.h | 2 +
libavcodec/fft.c | 4 +-
libavcodec/ppc/fft_altivec.c | 111 ++++++++++++++++++++++++++++++++++++++++-
3 files changed, 113 insertions(+), 4 deletions(-)
diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h
index 2d15bd3..4e7216e 100644
--- a/libavcodec/dsputil.h
+++ b/libavcodec/dsputil.h
@@ -784,6 +784,8 @@ void ff_imdct_calc_3dn2(MDCTContext *s, FFTSample *output, const FFTSample *inpu
void ff_imdct_half_3dn2(MDCTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_calc_sse(MDCTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_half_sse(MDCTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_calc_altivec(MDCTContext *s, FFTSample *output, const FFTSample *input);
+void ff_imdct_half_altivec(MDCTContext *s, FFTSample *output, const FFTSample *input);
void ff_mdct_calc(MDCTContext *s, FFTSample *out, const FFTSample *input);
void ff_mdct_end(MDCTContext *s);
diff --git a/libavcodec/fft.c b/libavcodec/fft.c
index bf791c1..d8e0e64 100644
--- a/libavcodec/fft.c
+++ b/libavcodec/fft.c
@@ -103,6 +103,8 @@ av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
#elif HAVE_ALTIVEC
has_vectors = mm_support();
if (has_vectors & FF_MM_ALTIVEC) {
+ s->imdct_calc = ff_imdct_calc_altivec;
+ s->imdct_half = ff_imdct_half_altivec;
s->fft_calc = ff_fft_calc_altivec;
}
#endif
diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c
index 1def80d..14a3144 100644
--- a/libavcodec/ppc/fft_altivec.c
+++ b/libavcodec/ppc/fft_altivec.c
@@ -49,7 +49,7 @@ static void swizzle(vec_f *z, int n)
}
}
-void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
+static av_always_inline void fft_dispatch(FFTContext *s, FFTComplex *z, int do_swizzle)
{
register vec_f v14 __asm__("v14") = {0,0,0,0};
register vec_f v15 __asm__("v15") = *(const vec_f*)ff_cos_16;
@@ -75,14 +75,119 @@ void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
"subi 1,1,%1 \n"
"bctrl \n"
"addi 1,1,%1 \n"
- ::"r"(ff_fft_dispatch_altivec[1][s->nbits-2]), "i"(12*sizeof(void*)),
+ ::"r"(ff_fft_dispatch_altivec[do_swizzle][s->nbits-2]), "i"(12*sizeof(void*)),
"r"(zarg), "r"(cos_tabs),
"v"(v14),"v"(v15),"v"(v16),"v"(v17),"v"(v18),"v"(v19),"v"(v20),"v"(v21),
"v"(v22),"v"(v23),"v"(v24),"v"(v25),"v"(v26),"v"(v27),"v"(v28),"v"(v29)
: "lr","ctr","r0","r1","r4","r5","r6","r7","r8","r9","r10","r11",
"v0","v1","v2","v3","v4","v5","v6","v7","v8","v9","v10","v11","v12","v13"
);
- if(s->nbits <= 4)
+ if(do_swizzle && s->nbits <= 4)
swizzle((vec_f*)z, 1<<s->nbits);
}
+void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
+{
+ fft_dispatch(s, z, 1);
+}
+
+void ff_imdct_half_altivec(MDCTContext *s, FFTSample *output, const FFTSample *input)
+{
+ int j, k;
+ int n = 1 << s->nbits;
+ int n4 = n >> 2;
+ int n8 = n >> 3;
+ int n32 = n >> 5;
+ const uint16_t *revtabj = s->fft.revtab;
+ const uint16_t *revtabk = s->fft.revtab+n4;
+ const vec_f *tcos = (const vec_f*)(s->tcos+n8);
+ const vec_f *tsin = (const vec_f*)(s->tsin+n8);
+ const vec_f *pin = (const vec_f*)(input+n4);
+ vec_f *pout = (vec_f*)(output+n4);
+
+ /* pre rotation */
+ k = n32-1;
+ do {
+ vec_f cos,sin,cos0,sin0,cos1,sin1,re,im,r0,i0,r1,i1,a,b,c,d;
+#define CMULA(p,o0,o1,o2,o3)\
+ a = pin[ k*2+p]; /* { z[k].re, z[k].im, z[k+1].re, z[k+1].im } */\
+ b = pin[-k*2-p-1]; /* { z[-k-2].re, z[-k-2].im, z[-k-1].re, z[-k-1].im } */\
+ re = vec_perm(a, b, vcprm(0,2,s0,s2)); /* { z[k].re, z[k+1].re, z[-k-2].re, z[-k-1].re } */\
+ im = vec_perm(a, b, vcprm(s3,s1,3,1)); /* { z[-k-1].im, z[-k-2].im, z[k+1].im, z[k].im } */\
+ cos = vec_perm(cos0, cos1, vcprm(o0,o1,s##o2,s##o3)); /* { cos[k], cos[k+1], cos[-k-2], cos[-k-1] } */\
+ sin = vec_perm(sin0, sin1, vcprm(o0,o1,s##o2,s##o3));\
+ r##p = im*cos - re*sin;\
+ i##p = re*cos + im*sin;
+#define STORE2(v,dst)\
+ j = dst;\
+ vec_ste(v, 0, output+j*2);\
+ vec_ste(v, 4, output+j*2);
+#define STORE8(p)\
+ a = vec_perm(r##p, i##p, vcprm(0,s0,0,s0));\
+ b = vec_perm(r##p, i##p, vcprm(1,s1,1,s1));\
+ c = vec_perm(r##p, i##p, vcprm(2,s2,2,s2));\
+ d = vec_perm(r##p, i##p, vcprm(3,s3,3,s3));\
+ STORE2(a, revtabk[ p*2-4]);\
+ STORE2(b, revtabk[ p*2-3]);\
+ STORE2(c, revtabj[-p*2+2]);\
+ STORE2(d, revtabj[-p*2+3]);
+
+ cos0 = tcos[k];
+ sin0 = tsin[k];
+ cos1 = tcos[-k-1];
+ sin1 = tsin[-k-1];
+ CMULA(0, 0,1,2,3);
+ CMULA(1, 2,3,0,1);
+ STORE8(0);
+ STORE8(1);
+ revtabj += 4;
+ revtabk -= 4;
+ k--;
+ } while(k>=0);
+
+ fft_dispatch(&s->fft, (FFTComplex*)output, 0);
+
+ /* post rotation + reordering */
+ j = -n32;
+ k = n32-1;
+ do {
+ vec_f cos,sin,re,im,a,b,c,d;
+#define CMULB(d0,d1,o)\
+ re = pout[o*2];\
+ im = pout[o*2+1];\
+ cos = tcos[o];\
+ sin = tsin[o];\
+ d0 = im*sin - re*cos;\
+ d1 = re*sin + im*cos;
+
+ CMULB(a,b,j);
+ CMULB(c,d,k);
+ pout[2*j] = vec_perm(a, d, vcprm(0,s3,1,s2));
+ pout[2*j+1] = vec_perm(a, d, vcprm(2,s1,3,s0));
+ pout[2*k] = vec_perm(c, b, vcprm(0,s3,1,s2));
+ pout[2*k+1] = vec_perm(c, b, vcprm(2,s1,3,s0));
+ j++;
+ k--;
+ } while(k>=0);
+}
+
+void ff_imdct_calc_altivec(MDCTContext *s, FFTSample *output, const FFTSample *input)
+{
+ int k;
+ int n = 1 << s->nbits;
+ int n4 = n >> 2;
+ int n16 = n >> 4;
+ vec_u32 sign = {1<<31,1<<31,1<<31,1<<31};
+ vec_u32 *p0 = (vec_u32*)(output+n4);
+ vec_u32 *p1 = (vec_u32*)(output+n4*3);
+
+ ff_imdct_half_altivec(s, output+n4, input);
+
+ for(k=0; k<n16; k++) {
+ vec_u32 a = p0[k] ^ sign;
+ vec_u32 b = p1[-k-1];
+ p0[-k-1] = vec_perm(a, a, vcprm(3,2,1,0));
+ p1[k] = vec_perm(b, b, vcprm(3,2,1,0));
+ }
+}
+
--
1.6.4
More information about the ffmpeg-devel
mailing list