[FFmpeg-devel] [PATCH] split-radix FFT

Loren Merritt lorenm
Tue Aug 5 02:04:57 CEST 2008


On Fri, 25 Jul 2008, Loren Merritt wrote:

> $subject, vaguely based on djbfft.
> Changed from djb:
> * added simd.
> * removed the hand-scheduled pentium-pro code. gcc's output from simple C is 
> better on all cpus I have access to.
> * removed the distinction between fft and ifft. they're just permutations of 
> eachother, so the difference belongs in revtab[] and not in the code.
> * removed the distinction between pass() and pass_big(). C can always use the 
> memory-efficient version, and simd never does because the shuffles are too 
> costly.
> * made an entirely different pass_big(), to avoid store->load aliasing.

yasm version.

Not nasm comptabile. In particular, I depend on the assembler to 
optimize away reg*0 in an address, which nasm does in some cases but not 
if there were 3 registers before the zero culling. I could workaround this 
at a cost of about 10 lines of code.

Doesn't distinguish HAVE_YASM from HAVE_MMX.

Doesn't support mingw64. There's no barrier in principle, I just don't 
have a win64 box, so I could never write that version of the 
calling-convention macros.

--Loren Merritt
-------------- next part --------------
>From afb93d1dec538e4c886b48479339f3af0742818a Mon Sep 17 00:00:00 2001
From: Loren Merritt <pengvado at akuvian.org>
Date: Sat, 2 Aug 2008 02:13:09 -0600
Subject: [PATCH] yasm buildsystem

---
 common.mak |    7 +++++++
 configure  |   31 +++++++++++++++++++++++++++++++
 2 files changed, 38 insertions(+), 0 deletions(-)

diff --git a/common.mak b/common.mak
index 93176c5..17519d9 100644
--- a/common.mak
+++ b/common.mak
@@ -8,6 +8,7 @@ ifndef SUBDIR
 vpath %.c $(SRC_DIR)
 vpath %.h $(SRC_DIR)
 vpath %.S $(SRC_DIR)
+vpath %.asm $(SRC_DIR)
 
 ifeq ($(SRC_DIR),$(SRC_PATH_BARE))
 BUILD_ROOT_REL = .
@@ -26,6 +27,9 @@ CFLAGS := -DHAVE_AV_CONFIG_H -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE \
 %.o: %.S
 	$(CC) $(CFLAGS) $(LIBOBJFLAGS) -c -o $@ $<
 
+%.o: %.asm
+	$(YASM) $(YASMFLAGS) -I $(<D)/ -o $@ $<
+
 %.ho: %.h
 	$(CC) $(CFLAGS) $(LIBOBJFLAGS) -Wno-unused -c -o $@ -x c $<
 
@@ -38,6 +42,9 @@ CFLAGS := -DHAVE_AV_CONFIG_H -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE \
 %.d: %.cpp
 	$(DEPEND_CMD) > $@
 
+%.d: %.asm
+	$(YASM) $(YASMFLAGS) -I $(<D)/ -M -o $(@:%.d=%.o) $< > $@
+
 %.o: %.d
 
 %$(EXESUF): %.c
diff --git a/configure b/configure
index 564ff03..d11a331 100755
--- a/configure
+++ b/configure
@@ -444,6 +444,13 @@ int foo(void){ asm volatile($asm); }
 EOF
 }
 
+check_yasm(){
+    log check_yasm "$@"
+    echo "$1" > $TMPS
+    shift 1
+    check_cmd $yasm $YASMFLAGS "$@" -o $TMPO $TMPS
+}
+
 check_ld(){
     log check_ld "$@"
     check_cc || return
@@ -927,6 +934,7 @@ shlibdir_default="$libdir_default"
 
 # toolchain
 cc="gcc"
+yasm="yasm"
 ar="ar"
 nm="nm"
 ranlib="ranlib"
@@ -1089,6 +1097,7 @@ echo "# $0 $@" > $logfile
 set >> $logfile
 
 cc="${cross_prefix}${cc}"
+yasm="${cross_prefix}${yasm}"
 ar="${cross_prefix}${ar}"
 nm="${cross_prefix}${nm}"
 ranlib="${cross_prefix}${ranlib}"
@@ -1179,6 +1188,8 @@ enable $arch
 enabled_any x86_32 x86_64 && enable x86
 enabled     sparc64       && enable sparc
 
+objformat="elf"
+
 # OS specific
 case $target_os in
     beos|haiku|zeta)
@@ -1243,6 +1254,7 @@ case $target_os in
         SLIBNAME_WITH_VERSION='$(SLIBPREF)$(FULLNAME).$(LIBVERSION)$(SLIBSUF)'
         SLIBNAME_WITH_MAJOR='$(SLIBPREF)$(FULLNAME).$(LIBMAJOR)$(SLIBSUF)'
         FFSERVERLDFLAGS=-Wl,-bind_at_load
+        objformat="macho"
         ;;
     mingw32*)
         target_os=mingw32
@@ -1269,6 +1281,7 @@ case $target_os in
             install -m 644 $(SUBDIR)$(SLIBNAME_WITH_MAJOR:$(SLIBSUF)=.lib) "$(SHLIBDIR)/$(SLIBNAME_WITH_MAJOR:$(SLIBSUF)=.lib)"'
         SLIB_UNINSTALL_EXTRA_CMD='rm -f "$(SHLIBDIR)/$(SLIBNAME:$(SLIBSUF)=.lib)"'
         SHFLAGS='-shared -Wl,--output-def,$$(@:$(SLIBSUF)=.def) -Wl,--enable-runtime-pseudo-reloc -Wl,--enable-auto-image-base'
+        objformat="win32"
         ;;
     cygwin*)
         target_os=cygwin
@@ -1285,12 +1298,14 @@ case $target_os in
         SLIBNAME_WITH_VERSION='$(SLIBPREF)$(FULLNAME)-$(LIBVERSION)$(SLIBSUF)'
         SLIBNAME_WITH_MAJOR='$(SLIBPREF)$(FULLNAME)-$(LIBMAJOR)$(SLIBSUF)'
         SHFLAGS='-shared -Wl,--enable-auto-image-base'
+        objformat="win32"
         ;;
     *-dos|freedos|opendos)
         disable ffplay ffserver vhook
         disable $INDEV_LIST $OUTDEV_LIST
         network_extralibs="-lsocket"
         EXESUF=".exe"
+        objformat="win32"
         ;;
     linux)
         LDLATEFLAGS="-Wl,--as-needed $LDLATEFLAGS"
@@ -1534,6 +1549,20 @@ EOF
     enabled mmx2  && check_asm mmx2  '"movss %xmm0, %xmm0"'
 
     check_asm bswap '"bswap %%eax" ::: "%eax"'
+
+    if test $arch = x86_64; then
+        YASMFLAGS="-f ${objformat}64 -DARCH_X86_64"
+        enabled shared && YASMFLAGS="$YASMFLAGS -DPIC"
+    else
+        YASMFLAGS="-f $objformat -DARCH_X86_32"
+    fi
+    if test $objformat = elf; then
+        enabled debug && YASMFLAGS="$YASMFLAGS -g dwarf2"
+    else
+        YASMFLAGS="$YASMFLAGS -DPREFIX"
+    fi
+    # FIXME: just disable yasm? but both the exe name and the enablement flag are naturally $yasm ...
+    check_yasm "pabsw xmm0, xmm0" || disable mmx2
 fi
 
 # check for assembler specific support
@@ -2028,6 +2057,7 @@ echo "INCDIR=\$(DESTDIR)$incdir" >> config.mak
 echo "BINDIR=\$(DESTDIR)$bindir" >> config.mak
 echo "MANDIR=\$(DESTDIR)$mandir" >> config.mak
 echo "CC=$cc" >> config.mak
+echo "YASM=$yasm" >> config.mak
 echo "AR=$ar" >> config.mak
 echo "RANLIB=$ranlib" >> config.mak
 echo "LN_S=$ln_s" >> config.mak
@@ -2040,6 +2070,7 @@ echo "VHOOKCFLAGS=$VHOOKCFLAGS" >> config.mak
 echo "LDFLAGS=$LDFLAGS" >> config.mak
 echo "FFSERVERLDFLAGS=$FFSERVERLDFLAGS" >> config.mak
 echo "SHFLAGS=$SHFLAGS" >> config.mak
+echo "YASMFLAGS=$YASMFLAGS" >> config.mak
 echo "VHOOKSHFLAGS=$VHOOKSHFLAGS" >> config.mak
 echo "VHOOKLIBS=$VHOOKLIBS" >> config.mak
 echo "LIBOBJFLAGS=$LIBOBJFLAGS" >> config.mak
-- 
1.5.5.1

-------------- next part --------------
>From 3711cff7d86f86e85be9805346b80ecaf0321172 Mon Sep 17 00:00:00 2001
From: Loren Merritt <pengvado at akuvian.org>
Date: Mon, 4 Aug 2008 15:06:12 -0600
Subject: [PATCH] import yasm macros from x264

---
 libavcodec/i386/x86inc.asm |  521 ++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 521 insertions(+), 0 deletions(-)
 create mode 100644 libavcodec/i386/x86inc.asm

diff --git a/libavcodec/i386/x86inc.asm b/libavcodec/i386/x86inc.asm
new file mode 100644
index 0000000..84787e1
--- /dev/null
+++ b/libavcodec/i386/x86inc.asm
@@ -0,0 +1,521 @@
+;*****************************************************************************
+;* x86inc.asm
+;*****************************************************************************
+;* Copyright (C) 2005-2008 Loren Merritt <lorenm at u.washington.edu>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;*****************************************************************************
+
+; FIXME: All of the 64bit asm functions that take a stride as an argument
+; via register, assume that the high dword of that register is filled with 0.
+; This is true in practice (since we never do any 64bit arithmetic on strides,
+; and x264's strides are all positive), but is not guaranteed by the ABI.
+
+; Name of the .rodata section.
+; Kludge: Something on OS X fails to align .rodata even given an align attribute,
+; so use a different read-only section.
+%macro SECTION_RODATA 0
+    %ifidn __OUTPUT_FORMAT__,macho64
+        SECTION .text align=16
+    %elifidn __OUTPUT_FORMAT__,macho
+        SECTION .text align=16
+        fakegot:
+    %else
+        SECTION .rodata align=16
+    %endif
+%endmacro
+
+; PIC support macros. All these macros are totally harmless when PIC is
+; not defined but can ruin everything if misused in PIC mode. On x86_32, shared
+; objects cannot directly access global variables by address, they need to
+; go through the GOT (global offset table). Most OSes do not care about it
+; and let you load non-shared .so objects (Linux, Win32...). However, OS X
+; requires PIC code in its .dylib objects.
+;
+; - GLOBAL should be used as a suffix for global addressing, eg.
+;     picgetgot ebx
+;     mov eax, [foo GLOBAL]
+;   instead of
+;     mov eax, [foo]
+;
+; - picgetgot computes the GOT address into the given register in PIC
+;   mode, otherwise does nothing. You need to do this before using GLOBAL.
+;   Before in both execution order and compiled code order (so GLOBAL knows
+;   which register the GOT is in).
+
+%ifndef PIC
+    %define GLOBAL
+    %macro picgetgot 1
+    %endmacro
+%elifdef ARCH_X86_64
+    %define PIC64
+    %define GLOBAL wrt rip
+    %macro picgetgot 1
+    %endmacro
+%else
+    %define PIC32
+    %ifidn __OUTPUT_FORMAT__,macho
+        ; There is no real global offset table on OS X, but we still
+        ; need to reference our variables by offset.
+        %macro picgetgot 1
+            call %%getgot
+          %%getgot:
+            pop %1
+            add %1, $$ - %%getgot
+            %undef GLOBAL
+            %define GLOBAL + %1 - fakegot
+        %endmacro
+    %else ; elf
+        extern _GLOBAL_OFFSET_TABLE_
+        %macro picgetgot 1
+            call %%getgot
+          %%getgot:
+            pop %1
+            add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%getgot wrt ..gotpc
+            %undef GLOBAL
+            %define GLOBAL + %1 wrt ..gotoff
+        %endmacro
+    %endif
+%endif
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used, not including PIC. pushes callee-saved regs if needed.
+; %3 = whether global constants are used in this function. inits x86_32 PIC if needed.
+; %4 = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,0, dst, src, tmp
+; declares a function (foo), taking two args (dst and src), one local variable (tmp), and not using globals
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE
+
+; REP_RET:
+; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
+; which are slow when a normal ret follows a branch.
+
+%macro DECLARE_REG 6
+    %define r%1q %2
+    %define r%1d %3
+    %define r%1w %4
+    %define r%1b %5
+    %define r%1m %6
+    %define r%1  %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 2
+    %define r%1q r%1
+    %define e%1q r%1
+    %define r%1d e%1
+    %define e%1d e%1
+    %define r%1w %1
+    %define e%1w %1
+    %define r%1b %2
+    %define e%1b %2
+%ifndef ARCH_X86_64
+    %define r%1  e%1
+%endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al
+DECLARE_REG_SIZE bx, bl
+DECLARE_REG_SIZE cx, cl
+DECLARE_REG_SIZE dx, dl
+DECLARE_REG_SIZE si, sil
+DECLARE_REG_SIZE di, dil
+DECLARE_REG_SIZE bp, bpl
+
+%ifdef ARCH_X86_64
+    %define gprsize 8
+%else
+    %define gprsize 4
+%endif
+
+%macro PUSH 1
+    push %1
+    %assign stack_offset stack_offset+gprsize
+%endmacro
+
+%macro POP 1
+    pop %1
+    %assign stack_offset stack_offset-gprsize
+%endmacro
+
+%macro SUB 2
+    sub %1, %2
+    %ifidn %1, rsp
+        %assign stack_offset stack_offset+(%2)
+    %endif
+%endmacro
+
+%macro ADD 2
+    add %1, %2
+    %ifidn %1, rsp
+        %assign stack_offset stack_offset-(%2)
+    %endif
+%endmacro
+
+%macro movifnidn 2
+    %ifnidn %1, %2
+        mov %1, %2
+    %endif
+%endmacro
+
+%macro movsxdifnidn 2
+    %ifnidn %1, %2
+        movsxd %1, %2
+    %endif
+%endmacro
+
+%macro ASSERT 1
+    %if (%1) == 0
+        %error assert failed
+    %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+    %ifdef n_arg_names
+        %assign %%i 0
+        %rep n_arg_names
+            CAT_UNDEF arg_name %+ %%i, q
+            CAT_UNDEF arg_name %+ %%i, d
+            CAT_UNDEF arg_name %+ %%i, w
+            CAT_UNDEF arg_name %+ %%i, b
+            CAT_UNDEF arg_name, %%i
+            %assign %%i %%i+1
+        %endrep
+    %endif
+
+    %assign %%i 0
+    %rep %0
+        %xdefine %1q r %+ %%i %+ q
+        %xdefine %1d r %+ %%i %+ d
+        %xdefine %1w r %+ %%i %+ w
+        %xdefine %1b r %+ %%i %+ b
+        CAT_XDEFINE arg_name, %%i, %1
+        %assign %%i %%i+1
+        %rotate 1
+    %endrep
+    %assign n_arg_names %%i
+%endmacro
+
+%ifdef ARCH_X86_64 ;========================================================
+
+DECLARE_REG 0, rdi, edi, di,  dil, edi
+DECLARE_REG 1, rsi, esi, si,  sil, esi
+DECLARE_REG 2, rdx, edx, dx,  dl,  edx
+DECLARE_REG 3, rcx, ecx, cx,  cl,  ecx
+DECLARE_REG 4, r8,  r8d, r8w, r8b, r8d
+DECLARE_REG 5, r9,  r9d, r9w, r9b, r9d
+DECLARE_REG 6, rax, eax, ax,  al,  [rsp + stack_offset + 8]
+%define r7m [rsp + stack_offset + 16]
+%define r8m [rsp + stack_offset + 24]
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+    %if %1 < %2
+        mov r%1, [rsp - 40 + %1*8]
+    %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ 0 ; #args, #regs, pic, arg_names...
+    ASSERT %2 >= %1
+    ASSERT %2 <= 7
+    %assign stack_offset 0
+    LOAD_IF_USED 6, %1
+    DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+    ret
+%endmacro
+
+%macro REP_RET 0
+    rep ret
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, eax, ax, al,   [esp + stack_offset + 4]
+DECLARE_REG 1, ecx, ecx, cx, cl,   [esp + stack_offset + 8]
+DECLARE_REG 2, edx, edx, dx, dl,   [esp + stack_offset + 12]
+DECLARE_REG 3, ebx, ebx, bx, bl,   [esp + stack_offset + 16]
+DECLARE_REG 4, esi, esi, si, null, [esp + stack_offset + 20]
+DECLARE_REG 5, edi, edi, di, null, [esp + stack_offset + 24]
+DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28]
+%define r7m [esp + stack_offset + 32]
+%define r8m [esp + stack_offset + 36]
+%define rsp esp
+
+%macro PUSH_IF_USED 1 ; reg_id
+    %if %1 < regs_used
+        push r%1
+        %assign stack_offset stack_offset+4
+    %endif
+%endmacro
+
+%macro POP_IF_USED 1 ; reg_id
+    %if %1 < regs_used
+        pop r%1
+    %endif
+%endmacro
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+    %if %1 < %2
+        mov r%1, [esp + stack_offset + 4 + %1*4]
+    %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ 0 ; #args, #regs, pic, arg_names...
+    ASSERT %2 >= %1
+    %assign stack_offset 0
+    %assign regs_used %2
+    %ifdef PIC
+    %if %3
+        %assign regs_used regs_used+1
+    %endif
+    %endif
+    ASSERT regs_used <= 7
+    PUSH_IF_USED 3
+    PUSH_IF_USED 4
+    PUSH_IF_USED 5
+    PUSH_IF_USED 6
+    LOAD_IF_USED 0, %1
+    LOAD_IF_USED 1, %1
+    LOAD_IF_USED 2, %1
+    LOAD_IF_USED 3, %1
+    LOAD_IF_USED 4, %1
+    LOAD_IF_USED 5, %1
+    LOAD_IF_USED 6, %1
+    %if %3
+        picgetgot r%2
+    %endif
+    DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+    POP_IF_USED 6
+    POP_IF_USED 5
+    POP_IF_USED 4
+    POP_IF_USED 3
+    ret
+%endmacro
+
+%macro REP_RET 0
+    %if regs_used > 3
+        RET
+    %else
+        rep ret
+    %endif
+%endmacro
+
+%endif ;======================================================================
+
+
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Symbol prefix for C linkage
+%macro cglobal 1-2+
+    %ifidn __OUTPUT_FORMAT__,elf
+        %ifdef PREFIX
+            global _%1:function hidden
+            %define %1 _%1
+        %else
+            global %1:function hidden
+        %endif
+    %else
+        %ifdef PREFIX
+            global _%1
+            %define %1 _%1
+        %else
+            global %1
+        %endif
+    %endif
+    align function_align
+    %1:
+    RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
+    %if %0 > 1
+        PROLOGUE %2
+    %endif
+%endmacro
+
+%macro cextern 1
+    %ifdef PREFIX
+        extern _%1
+        %define %1 _%1
+    %else
+        extern %1
+    %endif
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is
+; executable by default.
+%ifidn __OUTPUT_FORMAT__,elf
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
+%assign FENC_STRIDE 16
+%assign FDEC_STRIDE 32
+
+; merge mmx and sse*
+
+%macro CAT_XDEFINE 3
+    %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+    %undef %1%2
+%endmacro
+
+%macro INIT_MMX 0
+    %define RESET_MM_PERMUTATION INIT_MMX
+    %define mmsize 8
+    %define num_mmregs 8
+    %define mova movq
+    %define movu movq
+    %define movh movd
+    %define movnt movntq
+    %assign %%i 0
+    %rep 8
+    CAT_XDEFINE m, %%i, mm %+ %%i
+    CAT_XDEFINE nmm, %%i, %%i
+    %assign %%i %%i+1
+    %endrep
+    %rep 8
+    CAT_UNDEF m, %%i
+    CAT_UNDEF nmm, %%i
+    %assign %%i %%i+1
+    %endrep
+%endmacro
+
+%macro INIT_XMM 0
+    %define RESET_MM_PERMUTATION INIT_XMM
+    %define mmsize 16
+    %define num_mmregs 8
+    %ifdef ARCH_X86_64
+    %define num_mmregs 16
+    %endif
+    %define mova movdqa
+    %define movu movdqu
+    %define movh movq
+    %define movnt movntdq
+    %assign %%i 0
+    %rep num_mmregs
+    CAT_XDEFINE m, %%i, xmm %+ %%i
+    CAT_XDEFINE nxmm, %%i, %%i
+    %assign %%i %%i+1
+    %endrep
+%endmacro
+
+INIT_MMX
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+%rep %0/2
+    %xdefine tmp%2 m%2
+    %xdefine ntmp%2 nm%2
+    %rotate 2
+%endrep
+%rep %0/2
+    %xdefine m%1 tmp%2
+    %xdefine nm%1 ntmp%2
+    %undef tmp%2
+    %undef ntmp%2
+    %rotate 2
+%endrep
+%endmacro
+
+%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
+%rep %0-1
+%ifdef m%1
+    %xdefine tmp m%1
+    %xdefine m%1 m%2
+    %xdefine m%2 tmp
+    CAT_XDEFINE n, m%1, %1
+    CAT_XDEFINE n, m%2, %2
+%else
+    ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
+    ; Be careful using this mode in nested macros though, as in some cases there may be
+    ; other copies of m# that have already been dereferenced and don't get updated correctly.
+    %xdefine %%n1 n %+ %1
+    %xdefine %%n2 n %+ %2
+    %xdefine tmp m %+ %%n1
+    CAT_XDEFINE m, %%n1, m %+ %%n2
+    CAT_XDEFINE m, %%n2, tmp
+    CAT_XDEFINE n, m %+ %%n1, %%n1
+    CAT_XDEFINE n, m %+ %%n2, %%n2
+%endif
+    %undef tmp
+    %rotate 1
+%endrep
+%endmacro
+
+%macro SAVE_MM_PERMUTATION 1
+    %assign %%i 0
+    %rep num_mmregs
+    CAT_XDEFINE %1_m, %%i, m %+ %%i
+    %assign %%i %%i+1
+    %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1
+    %assign %%i 0
+    %rep num_mmregs
+    CAT_XDEFINE m, %%i, %1_m %+ %%i
+    %assign %%i %%i+1
+    %endrep
+%endmacro
+
+%macro call 1
+    call %1
+    %ifdef %1_m0
+        LOAD_MM_PERMUTATION %1
+    %endif
+%endmacro
+
+; substitutions which are functionally identical but reduce code size
+%define movdqa movaps
+%define movdqu movups
+
-- 
1.5.5.1

-------------- next part --------------
>From 47a48d0df45cd98e63935b5e95a5d477475e2463 Mon Sep 17 00:00:00 2001
From: Loren Merritt <pengvado at akuvian.org>
Date: Wed, 23 Jul 2008 22:55:09 -0600
Subject: [PATCH] split-radix FFT

---
 libavcodec/Makefile         |    1 +
 libavcodec/dsputil.h        |    9 +-
 libavcodec/fft.c            |  367 +++++++++++++++++++++++------------
 libavcodec/i386/fft_3dn.c   |  111 +----------
 libavcodec/i386/fft_3dn2.c  |  108 ++---------
 libavcodec/i386/fft_mmx.asm |  456 +++++++++++++++++++++++++++++++++++++++++++
 libavcodec/i386/fft_sse.c   |  149 ++++----------
 7 files changed, 764 insertions(+), 437 deletions(-)
 create mode 100644 libavcodec/i386/fft_mmx.asm

diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 9b3a974..6b8e02c 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -387,6 +387,7 @@ OBJS += i386/fdct_mmx.o \
         i386/simple_idct_mmx.o \
         i386/idct_mmx_xvid.o \
         i386/idct_sse2_xvid.o \
+        i386/fft_mmx.o \
         i386/fft_sse.o \
         i386/fft_3dn.o \
         i386/fft_3dn2.o \
diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h
index 7a47b87..29ab8b2 100644
--- a/libavcodec/dsputil.h
+++ b/libavcodec/dsputil.h
@@ -638,6 +638,8 @@ typedef struct FFTContext {
     uint16_t *revtab;
     FFTComplex *exptab;
     FFTComplex *exptab1; /* only used by SSE code */
+    FFTComplex *tmp_buf;
+    void (*fft_permute)(struct FFTContext *s, FFTComplex *z);
     void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
     void (*imdct_calc)(struct MDCTContext *s, FFTSample *output,
                        const FFTSample *input, FFTSample *tmp);
@@ -646,13 +648,18 @@ typedef struct FFTContext {
 } FFTContext;
 
 int ff_fft_init(FFTContext *s, int nbits, int inverse);
-void ff_fft_permute(FFTContext *s, FFTComplex *z);
+void ff_fft_permute_c(FFTContext *s, FFTComplex *z);
+void ff_fft_permute_sse(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_c(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
 
+static inline void ff_fft_permute(FFTContext *s, FFTComplex *z)
+{
+    s->fft_permute(s, z);
+}
 static inline void ff_fft_calc(FFTContext *s, FFTComplex *z)
 {
     s->fft_calc(s, z);
diff --git a/libavcodec/fft.c b/libavcodec/fft.c
index 47e9e06..43ed6e8 100644
--- a/libavcodec/fft.c
+++ b/libavcodec/fft.c
@@ -1,6 +1,8 @@
 /*
  * FFT/IFFT transforms
+ * Copyright (c) 2008 Loren Merritt
  * Copyright (c) 2002 Fabrice Bellard.
+ * Partly based on libdjbfft by D. J. Bernstein
  *
  * This file is part of FFmpeg.
  *
@@ -26,6 +28,39 @@
 
 #include "dsputil.h"
 
+/* cos(2*pi*x/n) for 0<=x<=n/4, followed by its reverse */
+DECLARE_ALIGNED_16(FFTSample, ff_cos_16[8]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_32[16]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_64[32]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_128[64]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_256[128]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_512[256]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_1024[512]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_2048[1024]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_4096[2048]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_8192[4096]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_16384[8192]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_32768[16384]);
+DECLARE_ALIGNED_16(FFTSample, ff_cos_65536[32768]);
+static FFTSample *ff_cos_tabs[] = {
+    ff_cos_16, ff_cos_32, ff_cos_64, ff_cos_128, ff_cos_256, ff_cos_512, ff_cos_1024,
+    ff_cos_2048, ff_cos_4096, ff_cos_8192, ff_cos_16384, ff_cos_32768, ff_cos_65536,
+};
+
+static int split_radix_permutation(int i, int n, int inverse)
+{
+    int m;
+    if(n <= 2) return i;
+    m = n >> 1;
+    if(i < m) return split_radix_permutation(i,m,inverse) << 1;
+    i -= m;
+    m >>= 1;
+    if(!inverse) i ^= m;
+    if(i < m) return (split_radix_permutation(i,m,inverse) << 2) + 1;
+    i -= m;
+    return ((split_radix_permutation(i,m,inverse) << 2) - 1) & (n - 1);
+}
+
 /**
  * The size of the FFT is 2^nbits. If inverse is TRUE, inverse FFT is
  * done
@@ -34,12 +69,15 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
 {
     int i, j, m, n;
     float alpha, c1, s1, s2;
-    int shuffle = 0;
+    int split_radix = 1;
     int av_unused has_vectors;
 
+    if (nbits < 2 || nbits > 16)
+        goto fail;
     s->nbits = nbits;
     n = 1 << nbits;
 
+    s->tmp_buf = NULL;
     s->exptab = av_malloc((n / 2) * sizeof(FFTComplex));
     if (!s->exptab)
         goto fail;
@@ -50,13 +88,7 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
 
     s2 = inverse ? 1.0 : -1.0;
 
-    for(i=0;i<(n/2);i++) {
-        alpha = 2 * M_PI * (float)i / (float)n;
-        c1 = cos(alpha);
-        s1 = sin(alpha) * s2;
-        s->exptab[i].re = c1;
-        s->exptab[i].im = s1;
-    }
+    s->fft_permute = ff_fft_permute_c;
     s->fft_calc = ff_fft_calc_c;
     s->imdct_calc = ff_imdct_calc;
     s->imdct_half = ff_imdct_half;
@@ -64,36 +96,55 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
 
 #ifdef HAVE_MMX
     has_vectors = mm_support();
-    shuffle = 1;
-    if (has_vectors & MM_3DNOWEXT) {
-        /* 3DNowEx for K7/K8 */
+    if (has_vectors & MM_SSE) {
+        /* SSE for P3/P4/K8 */
+        s->imdct_calc = ff_imdct_calc_sse;
+        s->imdct_half = ff_imdct_half_sse;
+        s->fft_permute = ff_fft_permute_sse;
+        s->fft_calc = ff_fft_calc_sse;
+    } else if (has_vectors & MM_3DNOWEXT) {
+        /* 3DNowEx for K7 */
         s->imdct_calc = ff_imdct_calc_3dn2;
         s->imdct_half = ff_imdct_half_3dn2;
         s->fft_calc = ff_fft_calc_3dn2;
     } else if (has_vectors & MM_3DNOW) {
         /* 3DNow! for K6-2/3 */
         s->fft_calc = ff_fft_calc_3dn;
-    } else if (has_vectors & MM_SSE) {
-        /* SSE for P3/P4 */
-        s->imdct_calc = ff_imdct_calc_sse;
-        s->imdct_half = ff_imdct_half_sse;
-        s->fft_calc = ff_fft_calc_sse;
-    } else {
-        shuffle = 0;
     }
 #elif defined HAVE_ALTIVEC && !defined ALTIVEC_USE_REFERENCE_C_CODE
     has_vectors = mm_support();
     if (has_vectors & MM_ALTIVEC) {
         s->fft_calc = ff_fft_calc_altivec;
-        shuffle = 1;
+        split_radix = 0;
     }
 #endif
 
     /* compute constant table for HAVE_SSE version */
-    if (shuffle) {
+    if (split_radix) {
+        for(j=4; j<=nbits; j++) {
+            int m = 1<<j;
+            double freq = 2*M_PI/m;
+            FFTSample *tab = ff_cos_tabs[j-4];
+            for(i=0; i<=m/4; i++)
+                tab[i] = cos(i*freq);
+            for(i=1; i<m/4; i++)
+                tab[m/2-i] = tab[i];
+        }
+        for(i=0; i<n; i++)
+            s->revtab[(n - split_radix_permutation(i, n, s->inverse)) % n] = i;
+        s->tmp_buf = av_malloc(n * sizeof(FFTComplex));
+    } else {
         int np, nblocks, np2, l;
         FFTComplex *q;
 
+        for(i=0; i<(n/2); i++) {
+            alpha = 2 * M_PI * (float)i / (float)n;
+            c1 = cos(alpha);
+            s1 = sin(alpha) * s2;
+            s->exptab[i].re = c1;
+            s->exptab[i].im = s1;
+        }
+
         np = 1 << nbits;
         nblocks = np >> 3;
         np2 = np >> 1;
@@ -116,7 +167,6 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
             nblocks = nblocks >> 1;
         } while (nblocks != 0);
         av_freep(&s->exptab);
-    }
 
     /* compute bit reverse table */
 
@@ -127,126 +177,35 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
         }
         s->revtab[i]=m;
     }
+    }
+
     return 0;
  fail:
     av_freep(&s->revtab);
     av_freep(&s->exptab);
     av_freep(&s->exptab1);
+    av_freep(&s->tmp_buf);
     return -1;
 }
 
-/* butter fly op */
-#define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \
-{\
-  FFTSample ax, ay, bx, by;\
-  bx=pre1;\
-  by=pim1;\
-  ax=qre1;\
-  ay=qim1;\
-  pre = (bx + ax);\
-  pim = (by + ay);\
-  qre = (bx - ax);\
-  qim = (by - ay);\
-}
-
-#define MUL16(a,b) ((a) * (b))
-
-#define CMUL(pre, pim, are, aim, bre, bim) \
-{\
-   pre = (MUL16(are, bre) - MUL16(aim, bim));\
-   pim = (MUL16(are, bim) + MUL16(bre, aim));\
-}
-
-/**
- * Do a complex FFT with the parameters defined in ff_fft_init(). The
- * input data must be permuted before with s->revtab table. No
- * 1.0/sqrt(n) normalization is done.
- */
-void ff_fft_calc_c(FFTContext *s, FFTComplex *z)
-{
-    int ln = s->nbits;
-    int j, np, np2;
-    int nblocks, nloops;
-    register FFTComplex *p, *q;
-    FFTComplex *exptab = s->exptab;
-    int l;
-    FFTSample tmp_re, tmp_im;
-
-    np = 1 << ln;
-
-    /* pass 0 */
-
-    p=&z[0];
-    j=(np >> 1);
-    do {
-        BF(p[0].re, p[0].im, p[1].re, p[1].im,
-           p[0].re, p[0].im, p[1].re, p[1].im);
-        p+=2;
-    } while (--j != 0);
-
-    /* pass 1 */
-
-
-    p=&z[0];
-    j=np >> 2;
-    if (s->inverse) {
-        do {
-            BF(p[0].re, p[0].im, p[2].re, p[2].im,
-               p[0].re, p[0].im, p[2].re, p[2].im);
-            BF(p[1].re, p[1].im, p[3].re, p[3].im,
-               p[1].re, p[1].im, -p[3].im, p[3].re);
-            p+=4;
-        } while (--j != 0);
-    } else {
-        do {
-            BF(p[0].re, p[0].im, p[2].re, p[2].im,
-               p[0].re, p[0].im, p[2].re, p[2].im);
-            BF(p[1].re, p[1].im, p[3].re, p[3].im,
-               p[1].re, p[1].im, p[3].im, -p[3].re);
-            p+=4;
-        } while (--j != 0);
-    }
-    /* pass 2 .. ln-1 */
-
-    nblocks = np >> 3;
-    nloops = 1 << 2;
-    np2 = np >> 1;
-    do {
-        p = z;
-        q = z + nloops;
-        for (j = 0; j < nblocks; ++j) {
-            BF(p->re, p->im, q->re, q->im,
-               p->re, p->im, q->re, q->im);
-
-            p++;
-            q++;
-            for(l = nblocks; l < np2; l += nblocks) {
-                CMUL(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im);
-                BF(p->re, p->im, q->re, q->im,
-                   p->re, p->im, tmp_re, tmp_im);
-                p++;
-                q++;
-            }
-
-            p += nloops;
-            q += nloops;
-        }
-        nblocks = nblocks >> 1;
-        nloops = nloops << 1;
-    } while (nblocks != 0);
-}
-
 /**
  * Do the permutation needed BEFORE calling ff_fft_calc()
  */
-void ff_fft_permute(FFTContext *s, FFTComplex *z)
+void ff_fft_permute_c(FFTContext *s, FFTComplex *z)
 {
     int j, k, np;
     FFTComplex tmp;
     const uint16_t *revtab = s->revtab;
+    np = 1 << s->nbits;
+
+    if (s->tmp_buf) {
+        /* TODO: handle split-radix permute in a more optimal way, probably in-place */
+        for(j=0;j<np;j++) s->tmp_buf[revtab[j]] = z[j];
+        memcpy(z, s->tmp_buf, np * sizeof(FFTComplex));
+        return;
+    }
 
     /* reverse */
-    np = 1 << s->nbits;
     for(j=0;j<np;j++) {
         k = revtab[j];
         if (k < j) {
@@ -262,5 +221,165 @@ void ff_fft_end(FFTContext *s)
     av_freep(&s->revtab);
     av_freep(&s->exptab);
     av_freep(&s->exptab1);
+    av_freep(&s->tmp_buf);
+}
+
+#define sqrthalf (float)M_SQRT1_2
+
+#define BF(x,y,a,b) {\
+    x = a - b;\
+    y = a + b;\
+}
+
+#define BUTTERFLIES(a0,a1,a2,a3) {\
+    BF(t3, t5, t5, t1);\
+    BF(a2.re, a0.re, a0.re, t5);\
+    BF(a3.im, a1.im, a1.im, t3);\
+    BF(t4, t6, t2, t6);\
+    BF(a3.re, a1.re, a1.re, t4);\
+    BF(a2.im, a0.im, a0.im, t6);\
+}
+
+// force loading all the inputs before storing any.
+// this is slightly slower for small data, but avoids store->load aliasing
+// for addresses separated by large powers of 2.
+#define BUTTERFLIES_BIG(a0,a1,a2,a3) {\
+    FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\
+    BF(t3, t5, t5, t1);\
+    BF(a2.re, a0.re, r0, t5);\
+    BF(a3.im, a1.im, i1, t3);\
+    BF(t4, t6, t2, t6);\
+    BF(a3.re, a1.re, r1, t4);\
+    BF(a2.im, a0.im, i0, t6);\
+}
+
+#define TRANSFORM(a0,a1,a2,a3,wre,wim) {\
+    t1 = a2.re * wre + a2.im * wim;\
+    t2 = a2.im * wre - a2.re * wim;\
+    t5 = a3.re * wre - a3.im * wim;\
+    t6 = a3.im * wre + a3.re * wim;\
+    BUTTERFLIES(a0,a1,a2,a3)\
+}
+
+#define TRANSFORM_ZERO(a0,a1,a2,a3) {\
+    t1 = a2.re;\
+    t2 = a2.im;\
+    t5 = a3.re;\
+    t6 = a3.im;\
+    BUTTERFLIES(a0,a1,a2,a3)\
+}
+
+static void fft4(FFTComplex *z)
+{
+    FFTSample t1, t2, t3, t4, t5, t6, t7, t8;
+
+    BF(t3, t1, z[0].re, z[1].re);
+    BF(t8, t6, z[3].re, z[2].re);
+    BF(z[2].re, z[0].re, t1, t6);
+    BF(t4, t2, z[0].im, z[1].im);
+    BF(t7, t5, z[2].im, z[3].im);
+    BF(z[3].im, z[1].im, t4, t8);
+    BF(z[3].re, z[1].re, t3, t7);
+    BF(z[2].im, z[0].im, t2, t5);
+}
+
+static void fft8(FFTComplex *z)
+{
+    FFTSample t1, t2, t3, t4, t5, t6, t7, t8;
+
+    fft4(z);
+
+    BF(t1, z[5].re, z[4].re, -z[5].re);
+    BF(t2, z[5].im, z[4].im, -z[5].im);
+    BF(t3, z[7].re, z[6].re, -z[7].re);
+    BF(t4, z[7].im, z[6].im, -z[7].im);
+    BF(t8, t1, t3, t1);
+    BF(t7, t2, t2, t4);
+    BF(z[4].re, z[0].re, z[0].re, t1);
+    BF(z[4].im, z[0].im, z[0].im, t2);
+    BF(z[6].re, z[2].re, z[2].re, t7);
+    BF(z[6].im, z[2].im, z[2].im, t8);
+
+    TRANSFORM(z[1],z[3],z[5],z[7],sqrthalf,sqrthalf);
+}
+
+static void fft16(FFTComplex *z)
+{
+    FFTSample t1, t2, t3, t4, t5, t6;
+
+    fft8(z);
+    fft4(z+8);
+    fft4(z+12);
+
+    TRANSFORM_ZERO(z[0],z[4],z[8],z[12]);
+    TRANSFORM(z[2],z[6],z[10],z[14],sqrthalf,sqrthalf);
+    TRANSFORM(z[1],z[5],z[9],z[13],ff_cos_16[1],ff_cos_16[3]);
+    TRANSFORM(z[3],z[7],z[11],z[15],ff_cos_16[3],ff_cos_16[1]);
+}
+
+/* z[0...8n-1], w[1...2n-1] */
+#define PASS(name)\
+static void name(FFTComplex *z, const FFTSample *wre, unsigned int n)\
+{\
+    FFTSample t1, t2, t3, t4, t5, t6;\
+    int o1 = 2*n;\
+    int o2 = 4*n;\
+    int o3 = 6*n;\
+    const FFTSample *wim = wre+o1;\
+    n--;\
+\
+    TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3]);\
+    TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\
+    do {\
+        z += 2;\
+        wre += 2;\
+        wim -= 2;\
+        TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0]);\
+        TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\
+    } while(--n);\
+}
+
+PASS(pass)
+#undef BUTTERFLIES
+#define BUTTERFLIES BUTTERFLIES_BIG
+PASS(pass_big)
+
+#define DECL_FFT(n,n2,n4)\
+static void fft##n(FFTComplex *z)\
+{\
+    fft##n2(z);\
+    fft##n4(z+n4*2);\
+    fft##n4(z+n4*3);\
+    pass(z,ff_cos_##n,n4/2);\
+}
+
+DECL_FFT(32,16,8)
+DECL_FFT(64,32,16)
+DECL_FFT(128,64,32)
+DECL_FFT(256,128,64)
+DECL_FFT(512,256,128)
+#define pass pass_big
+DECL_FFT(1024,512,256)
+DECL_FFT(2048,1024,512)
+DECL_FFT(4096,2048,1024)
+DECL_FFT(8192,4096,2048)
+DECL_FFT(16384,8192,4096)
+DECL_FFT(32768,16384,8192)
+DECL_FFT(65536,32768,16384)
+#undef pass
+
+static void (*fft_dispatch[])(FFTComplex*) = {
+    fft4, fft8, fft16, fft32, fft64, fft128, fft256, fft512, fft1024,
+    fft2048, fft4096, fft8192, fft16384, fft32768, fft65536,
+};
+
+/**
+ * Do a complex FFT with the parameters defined in ff_fft_init(). The
+ * input data must be permuted before with s->revtab table. No
+ * 1.0/sqrt(n) normalization is done.
+ */
+void ff_fft_calc_c(FFTContext *s, FFTComplex *z)
+{
+    fft_dispatch[s->nbits-2](z);
 }
 
diff --git a/libavcodec/i386/fft_3dn.c b/libavcodec/i386/fft_3dn.c
index 8bd7b89..6f2e2e8 100644
--- a/libavcodec/i386/fft_3dn.c
+++ b/libavcodec/i386/fft_3dn.c
@@ -1,7 +1,6 @@
 /*
  * FFT/MDCT transform with 3DNow! optimizations
- * Copyright (c) 2006 Zuxy MENG Jie, Loren Merritt
- * Based on fft_sse.c copyright (c) 2002 Fabrice Bellard.
+ * Copyright (c) 2008 Loren Merritt
  *
  * This file is part of FFmpeg.
  *
@@ -20,109 +19,5 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
-#include "libavutil/x86_cpu.h"
-#include "libavcodec/dsputil.h"
-
-static const int p1m1[2] __attribute__((aligned(8))) =
-    { 0, 1 << 31 };
-
-static const int m1p1[2] __attribute__((aligned(8))) =
-    { 1 << 31, 0 };
-
-void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z)
-{
-    int ln = s->nbits;
-    long j;
-    x86_reg i;
-    long nblocks, nloops;
-    FFTComplex *p, *cptr;
-
-    asm volatile(
-        /* FEMMS is not a must here but recommended by AMD */
-        "femms \n\t"
-        "movq %0, %%mm7 \n\t"
-        ::"m"(*(s->inverse ? m1p1 : p1m1))
-    );
-
-    i = 8 << ln;
-    asm volatile(
-        "1: \n\t"
-        "sub $32, %0 \n\t"
-        "movq    (%0,%1), %%mm0 \n\t"
-        "movq  16(%0,%1), %%mm1 \n\t"
-        "movq   8(%0,%1), %%mm2 \n\t"
-        "movq  24(%0,%1), %%mm3 \n\t"
-        "movq      %%mm0, %%mm4 \n\t"
-        "movq      %%mm1, %%mm5 \n\t"
-        "pfadd     %%mm2, %%mm0 \n\t"
-        "pfadd     %%mm3, %%mm1 \n\t"
-        "pfsub     %%mm2, %%mm4 \n\t"
-        "pfsub     %%mm3, %%mm5 \n\t"
-        "movq      %%mm0, %%mm2 \n\t"
-        "punpckldq %%mm5, %%mm6 \n\t"
-        "punpckhdq %%mm6, %%mm5 \n\t"
-        "movq      %%mm4, %%mm3 \n\t"
-        "pxor      %%mm7, %%mm5 \n\t"
-        "pfadd     %%mm1, %%mm0 \n\t"
-        "pfadd     %%mm5, %%mm4 \n\t"
-        "pfsub     %%mm1, %%mm2 \n\t"
-        "pfsub     %%mm5, %%mm3 \n\t"
-        "movq      %%mm0,   (%0,%1) \n\t"
-        "movq      %%mm4,  8(%0,%1) \n\t"
-        "movq      %%mm2, 16(%0,%1) \n\t"
-        "movq      %%mm3, 24(%0,%1) \n\t"
-        "jg 1b \n\t"
-        :"+r"(i)
-        :"r"(z)
-    );
-    /* pass 2 .. ln-1 */
-
-    nblocks = 1 << (ln-3);
-    nloops = 1 << 2;
-    cptr = s->exptab1;
-    do {
-        p = z;
-        j = nblocks;
-        do {
-            i = nloops*8;
-            asm volatile(
-                "1: \n\t"
-                "sub $16, %0 \n\t"
-                "movq    (%1,%0), %%mm0 \n\t"
-                "movq   8(%1,%0), %%mm1 \n\t"
-                "movq    (%2,%0), %%mm2 \n\t"
-                "movq   8(%2,%0), %%mm3 \n\t"
-                "movq      %%mm2, %%mm4 \n\t"
-                "movq      %%mm3, %%mm5 \n\t"
-                "punpckldq %%mm2, %%mm2 \n\t"
-                "punpckldq %%mm3, %%mm3 \n\t"
-                "punpckhdq %%mm4, %%mm4 \n\t"
-                "punpckhdq %%mm5, %%mm5 \n\t"
-                "pfmul   (%3,%0,2), %%mm2 \n\t" //  cre*re cim*re
-                "pfmul  8(%3,%0,2), %%mm3 \n\t"
-                "pfmul 16(%3,%0,2), %%mm4 \n\t" // -cim*im cre*im
-                "pfmul 24(%3,%0,2), %%mm5 \n\t"
-                "pfadd     %%mm2, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im
-                "pfadd     %%mm3, %%mm5 \n\t"
-                "movq      %%mm0, %%mm2 \n\t"
-                "movq      %%mm1, %%mm3 \n\t"
-                "pfadd     %%mm4, %%mm0 \n\t"
-                "pfadd     %%mm5, %%mm1 \n\t"
-                "pfsub     %%mm4, %%mm2 \n\t"
-                "pfsub     %%mm5, %%mm3 \n\t"
-                "movq      %%mm0,  (%1,%0) \n\t"
-                "movq      %%mm1, 8(%1,%0) \n\t"
-                "movq      %%mm2,  (%2,%0) \n\t"
-                "movq      %%mm3, 8(%2,%0) \n\t"
-                "jg 1b \n\t"
-                :"+r"(i)
-                :"r"(p), "r"(p + nloops), "r"(cptr)
-            );
-            p += nloops*2;
-        } while (--j);
-        cptr += nloops*2;
-        nblocks >>= 1;
-        nloops <<= 1;
-    } while (nblocks != 0);
-    asm volatile("femms");
-}
+#define EMULATE_3DNOWEXT
+#include "fft_3dn2.c"
diff --git a/libavcodec/i386/fft_3dn2.c b/libavcodec/i386/fft_3dn2.c
index 9068dff..60759f1 100644
--- a/libavcodec/i386/fft_3dn2.c
+++ b/libavcodec/i386/fft_3dn2.c
@@ -23,105 +23,23 @@
 #include "libavutil/x86_cpu.h"
 #include "libavcodec/dsputil.h"
 
-static const int p1m1[2] __attribute__((aligned(8))) =
-    { 0, 1 << 31 };
+#ifdef EMULATE_3DNOWEXT
+#define ff_fft_calc_3dn2 ff_fft_calc_3dn
+#define ff_fft_dispatch_3dn2 ff_fft_dispatch_3dn
+#define ff_imdct_calc_3dn2 ff_imdct_calc_3dn
+#define ff_imdct_half_3dn2 ff_imdct_half_3dn
+#endif
 
-static const int m1p1[2] __attribute__((aligned(8))) =
-    { 1 << 31, 0 };
+void ff_fft_dispatch_3dn2(FFTComplex *z, int nbits);
 
 void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z)
 {
-    int ln = s->nbits;
-    long j;
-    x86_reg i;
-    long nblocks, nloops;
-    FFTComplex *p, *cptr;
-
-    asm volatile(
-        /* FEMMS is not a must here but recommended by AMD */
-        "femms \n\t"
-        "movq %0, %%mm7 \n\t"
-        ::"m"(*(s->inverse ? m1p1 : p1m1))
-    );
-
-    i = 8 << ln;
-    asm volatile(
-        "1: \n\t"
-        "sub $32, %0 \n\t"
-        "movq    (%0,%1), %%mm0 \n\t"
-        "movq  16(%0,%1), %%mm1 \n\t"
-        "movq   8(%0,%1), %%mm2 \n\t"
-        "movq  24(%0,%1), %%mm3 \n\t"
-        "movq      %%mm0, %%mm4 \n\t"
-        "movq      %%mm1, %%mm5 \n\t"
-        "pfadd     %%mm2, %%mm0 \n\t"
-        "pfadd     %%mm3, %%mm1 \n\t"
-        "pfsub     %%mm2, %%mm4 \n\t"
-        "pfsub     %%mm3, %%mm5 \n\t"
-        "movq      %%mm0, %%mm2 \n\t"
-        "pswapd    %%mm5, %%mm5 \n\t"
-        "movq      %%mm4, %%mm3 \n\t"
-        "pxor      %%mm7, %%mm5 \n\t"
-        "pfadd     %%mm1, %%mm0 \n\t"
-        "pfadd     %%mm5, %%mm4 \n\t"
-        "pfsub     %%mm1, %%mm2 \n\t"
-        "pfsub     %%mm5, %%mm3 \n\t"
-        "movq      %%mm0,   (%0,%1) \n\t"
-        "movq      %%mm4,  8(%0,%1) \n\t"
-        "movq      %%mm2, 16(%0,%1) \n\t"
-        "movq      %%mm3, 24(%0,%1) \n\t"
-        "jg 1b \n\t"
-        :"+r"(i)
-        :"r"(z)
-    );
-    /* pass 2 .. ln-1 */
-
-    nblocks = 1 << (ln-3);
-    nloops = 1 << 2;
-    cptr = s->exptab1;
-    do {
-        p = z;
-        j = nblocks;
-        do {
-            i = nloops*8;
-            asm volatile(
-                "1: \n\t"
-                "sub $16, %0 \n\t"
-                "movq    (%1,%0), %%mm0 \n\t"
-                "movq   8(%1,%0), %%mm1 \n\t"
-                "movq    (%2,%0), %%mm2 \n\t"
-                "movq   8(%2,%0), %%mm3 \n\t"
-                "movq  (%3,%0,2), %%mm4 \n\t"
-                "movq 8(%3,%0,2), %%mm5 \n\t"
-                "pswapd    %%mm4, %%mm6 \n\t" // no need for cptr[2] & cptr[3]
-                "pswapd    %%mm5, %%mm7 \n\t"
-                "pfmul     %%mm2, %%mm4 \n\t" // cre*re cim*im
-                "pfmul     %%mm3, %%mm5 \n\t"
-                "pfmul     %%mm2, %%mm6 \n\t" // cim*re cre*im
-                "pfmul     %%mm3, %%mm7 \n\t"
-                "pfpnacc   %%mm6, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im
-                "pfpnacc   %%mm7, %%mm5 \n\t"
-                "movq      %%mm0, %%mm2 \n\t"
-                "movq      %%mm1, %%mm3 \n\t"
-                "pfadd     %%mm4, %%mm0 \n\t"
-                "pfadd     %%mm5, %%mm1 \n\t"
-                "pfsub     %%mm4, %%mm2 \n\t"
-                "pfsub     %%mm5, %%mm3 \n\t"
-                "movq      %%mm0,  (%1,%0) \n\t"
-                "movq      %%mm1, 8(%1,%0) \n\t"
-                "movq      %%mm2,  (%2,%0) \n\t"
-                "movq      %%mm3, 8(%2,%0) \n\t"
-                "jg 1b \n\t"
-                :"+r"(i)
-                :"r"(p), "r"(p + nloops), "r"(cptr)
-            );
-            p += nloops*2;
-        } while (--j);
-        cptr += nloops*2;
-        nblocks >>= 1;
-        nloops <<= 1;
-    } while (nblocks != 0);
+    int n = 1<<s->nbits;
+    int i;
+    ff_fft_dispatch_3dn2(z, s->nbits);
     asm volatile("femms");
+    for(i=0; i<n; i+=2)
+        FFSWAP(FFTSample, z[i].im, z[i+1].re);
 }
 
 static void imdct_3dn2(MDCTContext *s, const FFTSample *input, FFTSample *tmp)
@@ -162,7 +80,7 @@ static void imdct_3dn2(MDCTContext *s, const FFTSample *input, FFTSample *tmp)
         );
     }
 
-    ff_fft_calc(&s->fft, z);
+    ff_fft_dispatch_3dn2(z, s->fft.nbits);
 
     /* post rotation + reordering */
     for(k = 0; k < n4; k++) {
diff --git a/libavcodec/i386/fft_mmx.asm b/libavcodec/i386/fft_mmx.asm
new file mode 100644
index 0000000..6aebc3d
--- /dev/null
+++ b/libavcodec/i386/fft_mmx.asm
@@ -0,0 +1,456 @@
+;******************************************************************************
+;* FFT transform with SSE/3DNow optimizations
+;* Copyright (c) 2008 Loren Merritt
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "x86inc.asm"
+
+SECTION_RODATA
+
+%define M_SQRT1_2 0.70710678118654752440
+ps_root2: times 4 dd M_SQRT1_2
+ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
+ps_m1p1: dd 1<<31, 0
+
+%assign i 16
+%rep 13
+cextern ff_cos_ %+ i
+%assign i i<<1
+%endrep
+
+%ifdef ARCH_X86_64
+    %define pointer dq
+%else
+    %define pointer dd
+%endif
+
+%macro IF0 1+
+%endmacro
+%macro IF1 1+
+    %1
+%endmacro
+
+section .text align=16
+
+%macro T2_3DN 4 ; z0, z1, mem0, mem1
+    mova     %1, %3
+    mova     %2, %1
+    pfadd    %1, %4
+    pfsub    %2, %4
+%endmacro
+
+%macro T4_3DN 6 ; z0, z1, z2, z3, tmp0, tmp1
+    mova     %5, %3
+    pfsub    %3, %4
+    pfadd    %5, %4 ; {t6,t5}
+    pxor     %3, [ps_m1p1 GLOBAL] ; {t8,t7}
+    mova     %6, %1
+    pswapd   %3, %3
+    pfadd    %1, %5 ; {r0,i0}
+    pfsub    %6, %5 ; {r2,i2}
+    mova     %4, %2
+    pfadd    %2, %3 ; {r1,i1}
+    pfsub    %4, %3 ; {r3,i3}
+    SWAP     %3, %6
+%endmacro
+
+; in:  %1={r0,i0,r1,i1} %2={r2,i2,r3,i3}
+; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3}
+%macro T4_SSE 3
+    mova     %3, %1
+    shufps   %1, %2, 0x64 ; {r0,i0,r3,i2}
+    shufps   %3, %2, 0xce ; {r1,i1,r2,i3}
+    mova     %2, %1
+    addps    %1, %3       ; {t1,t2,t6,t5}
+    subps    %2, %3       ; {t3,t4,t8,t7}
+    mova     %3, %1
+    shufps   %1, %2, 0x44 ; {t1,t2,t3,t4}
+    shufps   %3, %2, 0xbe ; {t6,t5,t7,t8}
+    mova     %2, %1
+    addps    %1, %3       ; {r0,i0,r1,i1}
+    subps    %2, %3       ; {r2,i2,r3,i3}
+    mova     %3, %1
+    shufps   %1, %2, 0x88 ; {r0,r1,r2,r3}
+    shufps   %3, %2, 0xdd ; {i0,i1,i2,i3}
+    SWAP     %2, %3
+%endmacro
+
+%macro T8_SSE 6 ; r0,i0,r1,i1,t0,t1
+    mova     %5, %3
+    shufps   %3, %4, 0x44 ; {r4,i4,r6,i6}
+    shufps   %5, %4, 0xee ; {r5,i5,r7,i7}
+    mova     %6, %3
+    subps    %3, %5       ; {r5,i5,r7,i7}
+    addps    %6, %5       ; {t1,t2,t3,t4}
+    mova     %5, %3
+    shufps   %5, %5, 0xb1 ; {i5,r5,i7,r7}
+    mulps    %3, [ps_root2mppm GLOBAL] ; {-r5,i5,r7,-i7}
+    mulps    %5, [ps_root2 GLOBAL]
+    addps    %3, %5       ; {t8,t7,ta,t9}
+    mova     %5, %6
+    shufps   %6, %3, 0x36 ; {t3,t2,t9,t8}
+    shufps   %5, %3, 0x9c ; {t1,t4,t7,ta}
+    mova     %3, %6
+    addps    %6, %5       ; {t1,t2,t9,ta}
+    subps    %3, %5       ; {t6,t5,tc,tb}
+    mova     %5, %6
+    shufps   %6, %3, 0xd8 ; {t1,t9,t5,tb}
+    shufps   %5, %3, 0x8d ; {t2,ta,t6,tc}
+    mova     %3, %1
+    mova     %4, %2
+    addps    %1, %6       ; {r0,r1,r2,r3}
+    addps    %2, %5       ; {i0,i1,i2,i3}
+    subps    %3, %6       ; {r4,r5,r6,r7}
+    subps    %4, %5       ; {i4,i5,i6,i7}
+%endmacro
+
+; scheduled for cpu-bound sizes
+%macro PASS_SMALL 3 ; (to load m4-m7), wre, wim
+IF%1 mova    m4, Z(4)
+IF%1 mova    m5, Z(5)
+    mova     m0, %2 ; wre
+    mova     m2, m4
+    mova     m1, %3 ; wim
+    mova     m3, m5
+    mulps    m2, m0 ; r2*wre
+IF%1 mova    m6, Z(6)
+    mulps    m3, m1 ; i2*wim
+IF%1 mova    m7, Z(7)
+    mulps    m4, m1 ; r2*wim
+    mulps    m5, m0 ; i2*wre
+    addps    m2, m3 ; r2*wre + i2*wim
+    mova     m3, m1
+    mulps    m1, m6 ; r3*wim
+    subps    m5, m4 ; i2*wre - r2*wim
+    mova     m4, m0
+    mulps    m3, m7 ; i3*wim
+    mulps    m4, m6 ; r3*wre
+    mulps    m0, m7 ; i3*wre
+    subps    m4, m3 ; r3*wre - i3*wim
+    mova     m3, Z(0)
+    addps    m0, m1 ; i3*wre + r3*wim
+    mova     m1, m4
+    addps    m4, m2 ; t5
+    subps    m1, m2 ; t3
+    subps    m3, m4 ; r2
+    addps    m4, Z(0) ; r0
+    mova     m6, Z(2)
+    mova   Z(4), m3
+    mova   Z(0), m4
+    mova     m3, m5
+    subps    m5, m0 ; t4
+    mova     m4, m6
+    subps    m6, m5 ; r3
+    addps    m5, m4 ; r1
+    mova   Z(6), m6
+    mova   Z(2), m5
+    mova     m2, Z(3)
+    addps    m3, m0 ; t6
+    subps    m2, m1 ; i3
+    mova     m7, Z(1)
+    addps    m1, Z(3) ; i1
+    mova   Z(7), m2
+    mova   Z(3), m1
+    mova     m4, m7
+    subps    m7, m3 ; i2
+    addps    m3, m4 ; i0
+    mova   Z(5), m7
+    mova   Z(1), m3
+%endmacro
+
+; scheduled to avoid store->load aliasing
+%macro PASS_BIG 1 ; (!interleave)
+    mova     m4, Z(4) ; r2
+    mova     m5, Z(5) ; i2
+    mova     m2, m4
+    mova     m0, [wq] ; wre
+    mova     m3, m5
+    mova     m1, [wq+o1q] ; wim
+    mulps    m2, m0 ; r2*wre
+    mova     m6, Z(6) ; r3
+    mulps    m3, m1 ; i2*wim
+    mova     m7, Z(7) ; i3
+    mulps    m4, m1 ; r2*wim
+    mulps    m5, m0 ; i2*wre
+    addps    m2, m3 ; r2*wre + i2*wim
+    mova     m3, m1
+    mulps    m1, m6 ; r3*wim
+    subps    m5, m4 ; i2*wre - r2*wim
+    mova     m4, m0
+    mulps    m3, m7 ; i3*wim
+    mulps    m4, m6 ; r3*wre
+    mulps    m0, m7 ; i3*wre
+    subps    m4, m3 ; r3*wre - i3*wim
+    mova     m3, Z(0)
+    addps    m0, m1 ; i3*wre + r3*wim
+    mova     m1, m4
+    addps    m4, m2 ; t5
+    subps    m1, m2 ; t3
+    subps    m3, m4 ; r2
+    addps    m4, Z(0) ; r0
+    mova     m6, Z(2)
+    mova   Z(4), m3
+    mova   Z(0), m4
+    mova     m3, m5
+    subps    m5, m0 ; t4
+    mova     m4, m6
+    subps    m6, m5 ; r3
+    addps    m5, m4 ; r1
+IF%1 mova  Z(6), m6
+IF%1 mova  Z(2), m5
+    mova     m2, Z(3)
+    addps    m3, m0 ; t6
+    subps    m2, m1 ; i3
+    mova     m7, Z(1)
+    addps    m1, Z(3) ; i1
+IF%1 mova  Z(7), m2
+IF%1 mova  Z(3), m1
+    mova     m4, m7
+    subps    m7, m3 ; i2
+    addps    m3, m4 ; i0
+IF%1 mova  Z(5), m7
+IF%1 mova  Z(1), m3
+%if %1==0
+    mova     m4, m5 ; r1
+    mova     m0, m6 ; r3
+    unpcklps m5, m1
+    unpckhps m4, m1
+    unpcklps m6, m2
+    unpckhps m0, m2
+    mova     m1, Z(0)
+    mova     m2, Z(4)
+    mova   Z(2), m5
+    mova   Z(3), m4
+    mova   Z(6), m6
+    mova   Z(7), m0
+    mova     m5, m1 ; r0
+    mova     m4, m2 ; r2
+    unpcklps m1, m3
+    unpckhps m5, m3
+    unpcklps m2, m7
+    unpckhps m4, m7
+    mova   Z(0), m1
+    mova   Z(1), m5
+    mova   Z(4), m2
+    mova   Z(5), m4
+%endif
+%endmacro
+
+%macro PUNPCK 3
+    mova      %3, %1
+    punpckldq %1, %2
+    punpckhdq %3, %2
+%endmacro
+
+INIT_XMM
+
+%define Z(x) [r0+mmsize*x]
+
+align 16
+fft4_sse:
+    mova     m0, Z(0)
+    mova     m1, Z(1)
+    T4_SSE   m0, m1, m2
+    mova   Z(0), m0
+    mova   Z(1), m1
+    ret
+
+align 16
+fft8_sse:
+    mova     m0, Z(0)
+    mova     m1, Z(1)
+    T4_SSE   m0, m1, m2
+    mova     m2, Z(2)
+    mova     m3, Z(3)
+    T8_SSE   m0, m1, m2, m3, m4, m5
+    mova   Z(0), m0
+    mova   Z(1), m1
+    mova   Z(2), m2
+    mova   Z(3), m3
+    ret
+
+align 16
+fft16_sse:
+    mova     m0, Z(0)
+    mova     m1, Z(1)
+    T4_SSE   m0, m1, m2
+    mova     m2, Z(2)
+    mova     m3, Z(3)
+    T8_SSE   m0, m1, m2, m3, m4, m5
+    mova     m4, Z(4)
+    mova     m5, Z(5)
+    mova   Z(0), m0
+    mova   Z(1), m1
+    mova   Z(2), m2
+    mova   Z(3), m3
+    T4_SSE   m4, m5, m6
+    mova     m6, Z(6)
+    mova     m7, Z(7)
+    T4_SSE   m6, m7, m0
+    PASS_SMALL 0, [ff_cos_16 GLOBAL], [ff_cos_16+16 GLOBAL]
+    ret
+
+
+INIT_MMX
+
+%macro FFT48_3DN 1
+align 16
+fft4%1:
+    T2_3DN   m0, m1, Z(0), Z(1)
+    mova     m2, Z(2)
+    mova     m3, Z(3)
+    T4_3DN   m0, m1, m2, m3, m4, m5
+    PUNPCK   m0, m1, m4
+    PUNPCK   m2, m3, m5
+    mova   Z(0), m0
+    mova   Z(1), m4
+    mova   Z(2), m2
+    mova   Z(3), m5
+    ret
+
+align 16
+fft8%1:
+    T2_3DN   m0, m1, Z(0), Z(1)
+    mova     m2, Z(2)
+    mova     m3, Z(3)
+    T4_3DN   m0, m1, m2, m3, m4, m5
+    mova   Z(0), m0
+    mova   Z(2), m2
+    T2_3DN   m4, m5, Z(4), Z(5)
+    T2_3DN   m6, m7, Z(6), Z(7)
+    pswapd   m0, m5
+    pswapd   m2, m7
+    pxor     m0, [ps_m1p1 GLOBAL]
+    pxor     m2, [ps_m1p1 GLOBAL]
+    pfsub    m5, m0
+    pfadd    m7, m2
+    pfmul    m5, [ps_root2 GLOBAL]
+    pfmul    m7, [ps_root2 GLOBAL]
+    T4_3DN   m1, m3, m5, m7, m0, m2
+    mova   Z(5), m5
+    mova   Z(7), m7
+    mova     m0, Z(0)
+    mova     m2, Z(2)
+    T4_3DN   m0, m2, m4, m6, m5, m7
+    PUNPCK   m0, m1, m5
+    PUNPCK   m2, m3, m7
+    mova   Z(0), m0
+    mova   Z(1), m5
+    mova   Z(2), m2
+    mova   Z(3), m7
+    PUNPCK   m4, Z(5), m5
+    PUNPCK   m6, Z(7), m7
+    mova   Z(4), m4
+    mova   Z(5), m5
+    mova   Z(6), m6
+    mova   Z(7), m7
+    ret
+%endmacro
+
+FFT48_3DN _3dn2
+
+%macro pswapd 2
+%ifidn %1, %2
+    movd [r0+12], %1
+    punpckhdq %1, [r0+8]
+%else
+    movq  %1, %2
+    psrlq %1, 32
+    punpckldq %1, %2
+%endif
+%endmacro
+
+FFT48_3DN _3dn
+
+
+%define Z(x) [zq + o1q*(x&6)*((x/6)^1) + o3q*(x/6) + mmsize*(x&1)]
+
+%macro DECL_PASS 2+ ; name, payload
+align 16
+%1:
+DEFINE_ARGS z, w, n, o1, o3
+    lea o3q, [nq*3]
+    lea o1q, [nq*8]
+    shl o3q, 4
+.loop:
+    %2
+    add zq, mmsize*2
+    add wq, mmsize
+    sub nd, mmsize/8
+    jg .loop
+    rep ret
+%endmacro
+
+INIT_XMM
+DECL_PASS pass_sse, PASS_BIG 1
+DECL_PASS pass_interleave_sse, PASS_BIG 0
+
+INIT_MMX
+%define mulps pfmul
+%define addps pfadd
+%define subps pfsub
+DECL_PASS pass_3dn, PASS_SMALL 1, [wq], [wq+o1q]
+%define pass_3dn2 pass_3dn
+
+
+%macro DECL_FFT 2-3 ; nbits, cpu, suffix
+%xdefine list_of_fft fft4%2, fft8%2
+%if %1==5
+%xdefine list_of_fft list_of_fft, fft16%2
+%endif
+
+%assign n 1<<%1
+%rep 17-%1
+%assign n2 n/2
+%assign n4 n/4
+%xdefine list_of_fft list_of_fft, fft %+ n %+ %3%2
+
+align 16
+fft %+ n %+ %3%2:
+    call fft %+ n2 %+ %2
+    add r0, n*4 - (n&(-2<<%1))
+    call fft %+ n4 %+ %2
+    add r0, n*2 - (n2&(-2<<%1))
+    call fft %+ n4 %+ %2
+    sub r0, n*6 + (n2&(-2<<%1))
+    lea r1, [ff_cos_ %+ n GLOBAL]
+    mov r2d, n4/2
+    jmp pass%3%2
+
+%assign n n*2
+%endrep
+%undef n
+
+align 8
+dispatch_tab%3%2: pointer list_of_fft
+
+; on x86_32, this function does the register saving and restoring for all of fft
+; the others pass args in registers and don't spill anything
+cglobal ff_fft_dispatch%3%2, 2,5,0, z, nbits
+    lea r2, [dispatch_tab%3%2 GLOBAL]
+    mov r2, [r2 + (nbitsq-2)*gprsize]
+    call r2
+    RET
+%endmacro ; DECL_FFT
+
+DECL_FFT 5, _sse
+DECL_FFT 5, _sse, _interleave
+DECL_FFT 4, _3dn
+DECL_FFT 4, _3dn2
+
diff --git a/libavcodec/i386/fft_sse.c b/libavcodec/i386/fft_sse.c
index 305f44a..a164907 100644
--- a/libavcodec/i386/fft_sse.c
+++ b/libavcodec/i386/fft_sse.c
@@ -22,124 +22,55 @@
 #include "libavutil/x86_cpu.h"
 #include "libavcodec/dsputil.h"
 
-static const int p1p1p1m1[4] __attribute__((aligned(16))) =
-    { 0, 0, 0, 1 << 31 };
-
-static const int p1p1m1p1[4] __attribute__((aligned(16))) =
-    { 0, 0, 1 << 31, 0 };
-
-static const int p1p1m1m1[4] __attribute__((aligned(16))) =
-    { 0, 0, 1 << 31, 1 << 31 };
-
 static const int p1m1p1m1[4] __attribute__((aligned(16))) =
     { 0, 1 << 31, 0, 1 << 31 };
 
 static const int m1m1m1m1[4] __attribute__((aligned(16))) =
     { 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
 
-#if 0
-static void print_v4sf(const char *str, __m128 a)
-{
-    float *p = (float *)&a;
-    printf("%s: %f %f %f %f\n",
-           str, p[0], p[1], p[2], p[3]);
-}
-#endif
+void ff_fft_dispatch_sse(FFTComplex *z, int nbits);
+void ff_fft_dispatch_interleave_sse(FFTComplex *z, int nbits);
 
-/* XXX: handle reverse case */
 void ff_fft_calc_sse(FFTContext *s, FFTComplex *z)
 {
-    int ln = s->nbits;
-    x86_reg i;
-    long j;
-    long nblocks, nloops;
-    FFTComplex *p, *cptr;
+    int n = 1 << s->nbits;
 
-    asm volatile(
-        "movaps %0, %%xmm4 \n\t"
-        "movaps %1, %%xmm5 \n\t"
-        ::"m"(*p1p1m1m1),
-          "m"(*(s->inverse ? p1p1m1p1 : p1p1p1m1))
-    );
+    ff_fft_dispatch_interleave_sse(z, s->nbits);
 
-    i = 8 << ln;
-    asm volatile(
-        "1: \n\t"
-        "sub $32, %0 \n\t"
-        /* do the pass 0 butterfly */
-        "movaps   (%0,%1), %%xmm0 \n\t"
-        "movaps    %%xmm0, %%xmm1 \n\t"
-        "shufps     $0x4E, %%xmm0, %%xmm0 \n\t"
-        "xorps     %%xmm4, %%xmm1 \n\t"
-        "addps     %%xmm1, %%xmm0 \n\t"
-        "movaps 16(%0,%1), %%xmm2 \n\t"
-        "movaps    %%xmm2, %%xmm3 \n\t"
-        "shufps     $0x4E, %%xmm2, %%xmm2 \n\t"
-        "xorps     %%xmm4, %%xmm3 \n\t"
-        "addps     %%xmm3, %%xmm2 \n\t"
-        /* multiply third by -i */
-        /* by toggling the sign bit */
-        "shufps     $0xB4, %%xmm2, %%xmm2 \n\t"
-        "xorps     %%xmm5, %%xmm2 \n\t"
-        /* do the pass 1 butterfly */
-        "movaps    %%xmm0, %%xmm1 \n\t"
-        "addps     %%xmm2, %%xmm0 \n\t"
-        "subps     %%xmm2, %%xmm1 \n\t"
-        "movaps    %%xmm0,   (%0,%1) \n\t"
-        "movaps    %%xmm1, 16(%0,%1) \n\t"
-        "jg 1b \n\t"
-        :"+r"(i)
-        :"r"(z)
-    );
-    /* pass 2 .. ln-1 */
+    if(n <= 16) {
+        x86_reg i = -8*n;
+        asm volatile(
+            "1: \n"
+            "movaps     (%0,%1), %%xmm0 \n"
+            "movaps      %%xmm0, %%xmm1 \n"
+            "unpcklps 16(%0,%1), %%xmm0 \n"
+            "unpckhps 16(%0,%1), %%xmm1 \n"
+            "movaps      %%xmm0,   (%0,%1) \n"
+            "movaps      %%xmm1, 16(%0,%1) \n"
+            "add $32, %0 \n"
+            "jl 1b \n"
+            :"+r"(i)
+            :"r"(z+n)
+            :"memory"
+        );
+    }
+}
 
-    nblocks = 1 << (ln-3);
-    nloops = 1 << 2;
-    cptr = s->exptab1;
-    do {
-        p = z;
-        j = nblocks;
-        do {
-            i = nloops*8;
-            asm volatile(
-                "1: \n\t"
-                "sub $32, %0 \n\t"
-                "movaps    (%2,%0), %%xmm1 \n\t"
-                "movaps    (%1,%0), %%xmm0 \n\t"
-                "movaps  16(%2,%0), %%xmm5 \n\t"
-                "movaps  16(%1,%0), %%xmm4 \n\t"
-                "movaps     %%xmm1, %%xmm2 \n\t"
-                "movaps     %%xmm5, %%xmm6 \n\t"
-                "shufps      $0xA0, %%xmm1, %%xmm1 \n\t"
-                "shufps      $0xF5, %%xmm2, %%xmm2 \n\t"
-                "shufps      $0xA0, %%xmm5, %%xmm5 \n\t"
-                "shufps      $0xF5, %%xmm6, %%xmm6 \n\t"
-                "mulps   (%3,%0,2), %%xmm1 \n\t" //  cre*re cim*re
-                "mulps 16(%3,%0,2), %%xmm2 \n\t" // -cim*im cre*im
-                "mulps 32(%3,%0,2), %%xmm5 \n\t" //  cre*re cim*re
-                "mulps 48(%3,%0,2), %%xmm6 \n\t" // -cim*im cre*im
-                "addps      %%xmm2, %%xmm1 \n\t"
-                "addps      %%xmm6, %%xmm5 \n\t"
-                "movaps     %%xmm0, %%xmm3 \n\t"
-                "movaps     %%xmm4, %%xmm7 \n\t"
-                "addps      %%xmm1, %%xmm0 \n\t"
-                "subps      %%xmm1, %%xmm3 \n\t"
-                "addps      %%xmm5, %%xmm4 \n\t"
-                "subps      %%xmm5, %%xmm7 \n\t"
-                "movaps     %%xmm0, (%1,%0) \n\t"
-                "movaps     %%xmm3, (%2,%0) \n\t"
-                "movaps     %%xmm4, 16(%1,%0) \n\t"
-                "movaps     %%xmm7, 16(%2,%0) \n\t"
-                "jg 1b \n\t"
-                :"+r"(i)
-                :"r"(p), "r"(p + nloops), "r"(cptr)
-            );
-            p += nloops*2;
-        } while (--j);
-        cptr += nloops*2;
-        nblocks >>= 1;
-        nloops <<= 1;
-    } while (nblocks != 0);
+void ff_fft_permute_sse(FFTContext *s, FFTComplex *z)
+{
+    int n = 1 << s->nbits;
+    int i;
+    for(i=0; i<n; i+=2) {
+        asm volatile(
+            "movaps %2, %%xmm0 \n"
+            "movlps %%xmm0, %0 \n"
+            "movhps %%xmm0, %1 \n"
+            :"=m"(s->tmp_buf[s->revtab[i]]),
+             "=m"(s->tmp_buf[s->revtab[i+1]])
+            :"m"(z[i])
+        );
+    }
+    memcpy(z, s->tmp_buf, n*sizeof(FFTComplex));
 }
 
 static void imdct_sse(MDCTContext *s, const FFTSample *input, FFTSample *tmp)
@@ -213,7 +144,7 @@ static void imdct_sse(MDCTContext *s, const FFTSample *input, FFTSample *tmp)
         );
     }
 
-    ff_fft_calc_sse(&s->fft, z);
+    ff_fft_dispatch_sse(z, s->fft.nbits);
 
 #ifndef ARCH_X86_64
 #undef P1M1P1M1
-- 
1.5.5.1




More information about the ffmpeg-devel mailing list