[FFmpeg-cvslog] vp9: split x86 assembly in two files.

Ronald S. Bultje git at videolan.org
Sat Dec 7 19:08:34 CET 2013


ffmpeg | branch: master | Ronald S. Bultje <rsbultje at gmail.com> | Sun Dec  1 16:27:16 2013 -0500| [8729964b996cc2590704108f7a432b8d10b63738] | committer: Ronald S. Bultje

vp9: split x86 assembly in two files.

(And in future, loopfilter or intra pred could be put in their own
respective files also.)

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=8729964b996cc2590704108f7a432b8d10b63738
---

 libavcodec/x86/Makefile                     |    3 +-
 libavcodec/x86/{vp9dsp.asm => vp9itxfm.asm} |  260 +------------------------
 libavcodec/x86/vp9mc.asm                    |  278 +++++++++++++++++++++++++++
 3 files changed, 282 insertions(+), 259 deletions(-)

diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile
index fa281dd..2d2d5a0 100644
--- a/libavcodec/x86/Makefile
+++ b/libavcodec/x86/Makefile
@@ -101,5 +101,6 @@ YASM-OBJS-$(CONFIG_VP3DSP)             += x86/vp3dsp.o
 YASM-OBJS-$(CONFIG_VP6_DECODER)        += x86/vp6dsp.o
 YASM-OBJS-$(CONFIG_VP8_DECODER)        += x86/vp8dsp.o                  \
                                           x86/vp8dsp_loopfilter.o
-YASM-OBJS-$(CONFIG_VP9_DECODER)        += x86/vp9dsp.o
+YASM-OBJS-$(CONFIG_VP9_DECODER)        += x86/vp9itxfm.o                \
+                                          x86/vp9mc.o
 YASM-OBJS-$(CONFIG_WEBP_DECODER)       += x86/vp8dsp.o
diff --git a/libavcodec/x86/vp9dsp.asm b/libavcodec/x86/vp9itxfm.asm
similarity index 60%
rename from libavcodec/x86/vp9dsp.asm
rename to libavcodec/x86/vp9itxfm.asm
index 8b08914..7b9d7df 100644
--- a/libavcodec/x86/vp9dsp.asm
+++ b/libavcodec/x86/vp9itxfm.asm
@@ -1,7 +1,7 @@
 ;******************************************************************************
-;* VP9 SIMD optimizations
+;* VP9 IDCT SIMD optimizations
 ;*
-;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+;* Copyright (C) 2013 Clément Bœsch <u pkh me>
 ;*
 ;* This file is part of FFmpeg.
 ;*
@@ -24,65 +24,6 @@
 
 SECTION_RODATA
 
-; FIXME share with vp8dsp.asm
-pw_256:   times 8 dw 256
-
-%macro F8_TAPS 8
-times 8 db %1, %2
-times 8 db %3, %4
-times 8 db %5, %6
-times 8 db %7, %8
-%endmacro
-; int8_t ff_filters_ssse3[3][15][4][16]
-const filters_ssse3 ; smooth
-                    F8_TAPS -3, -1,  32,  64,  38,   1, -3,  0
-                    F8_TAPS -2, -2,  29,  63,  41,   2, -3,  0
-                    F8_TAPS -2, -2,  26,  63,  43,   4, -4,  0
-                    F8_TAPS -2, -3,  24,  62,  46,   5, -4,  0
-                    F8_TAPS -2, -3,  21,  60,  49,   7, -4,  0
-                    F8_TAPS -1, -4,  18,  59,  51,   9, -4,  0
-                    F8_TAPS -1, -4,  16,  57,  53,  12, -4, -1
-                    F8_TAPS -1, -4,  14,  55,  55,  14, -4, -1
-                    F8_TAPS -1, -4,  12,  53,  57,  16, -4, -1
-                    F8_TAPS  0, -4,   9,  51,  59,  18, -4, -1
-                    F8_TAPS  0, -4,   7,  49,  60,  21, -3, -2
-                    F8_TAPS  0, -4,   5,  46,  62,  24, -3, -2
-                    F8_TAPS  0, -4,   4,  43,  63,  26, -2, -2
-                    F8_TAPS  0, -3,   2,  41,  63,  29, -2, -2
-                    F8_TAPS  0, -3,   1,  38,  64,  32, -1, -3
-                    ; regular
-                    F8_TAPS  0,  1,  -5, 126,   8,  -3,  1,  0
-                    F8_TAPS -1,  3, -10, 122,  18,  -6,  2,  0
-                    F8_TAPS -1,  4, -13, 118,  27,  -9,  3, -1
-                    F8_TAPS -1,  4, -16, 112,  37, -11,  4, -1
-                    F8_TAPS -1,  5, -18, 105,  48, -14,  4, -1
-                    F8_TAPS -1,  5, -19,  97,  58, -16,  5, -1
-                    F8_TAPS -1,  6, -19,  88,  68, -18,  5, -1
-                    F8_TAPS -1,  6, -19,  78,  78, -19,  6, -1
-                    F8_TAPS -1,  5, -18,  68,  88, -19,  6, -1
-                    F8_TAPS -1,  5, -16,  58,  97, -19,  5, -1
-                    F8_TAPS -1,  4, -14,  48, 105, -18,  5, -1
-                    F8_TAPS -1,  4, -11,  37, 112, -16,  4, -1
-                    F8_TAPS -1,  3,  -9,  27, 118, -13,  4, -1
-                    F8_TAPS  0,  2,  -6,  18, 122, -10,  3, -1
-                    F8_TAPS  0,  1,  -3,   8, 126,  -5,  1,  0
-                    ; sharp
-                    F8_TAPS -1,  3,  -7, 127,   8,  -3,  1,  0
-                    F8_TAPS -2,  5, -13, 125,  17,  -6,  3, -1
-                    F8_TAPS -3,  7, -17, 121,  27, -10,  5, -2
-                    F8_TAPS -4,  9, -20, 115,  37, -13,  6, -2
-                    F8_TAPS -4, 10, -23, 108,  48, -16,  8, -3
-                    F8_TAPS -4, 10, -24, 100,  59, -19,  9, -3
-                    F8_TAPS -4, 11, -24,  90,  70, -21, 10, -4
-                    F8_TAPS -4, 11, -23,  80,  80, -23, 11, -4
-                    F8_TAPS -4, 10, -21,  70,  90, -24, 11, -4
-                    F8_TAPS -3,  9, -19,  59, 100, -24, 10, -4
-                    F8_TAPS -3,  8, -16,  48, 108, -23, 10, -4
-                    F8_TAPS -2,  6, -13,  37, 115, -20,  9, -4
-                    F8_TAPS -2,  5, -10,  27, 121, -17,  7, -3
-                    F8_TAPS -1,  3,  -6,  17, 125, -13,  5, -2
-                    F8_TAPS  0,  1,  -3,   8, 127,  -7,  3, -1
-
 pw_11585x2: times 8 dw 23170
 
 %macro VP9_IDCT_COEFFS 2
@@ -106,10 +47,6 @@ pw_1024: times 8 dw 1024
 
 SECTION .text
 
-;
-; IDCT helpers
-;
-
 ; (a*x + b*y + round) >> shift
 %macro VP9_MULSUB_2W_2X 6 ; dst1, dst2, src (unchanged), round, coefs1, coefs2
     pmaddwd            m%1, m%3, %5
@@ -379,196 +316,3 @@ cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
     VP9_IDCT8_WRITEOUT
     RET
 %endif
-
-
-%macro filter_h_fn 1
-%assign %%px mmsize/2
-cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery
-    mova        m6, [pw_256]
-    mova        m7, [filteryq+ 0]
-%if ARCH_X86_64 && mmsize > 8
-    mova        m8, [filteryq+16]
-    mova        m9, [filteryq+32]
-    mova       m10, [filteryq+48]
-%endif
-.loop:
-    movh        m0, [srcq-3]
-    movh        m1, [srcq-2]
-    movh        m2, [srcq-1]
-    movh        m3, [srcq+0]
-    movh        m4, [srcq+1]
-    movh        m5, [srcq+2]
-    punpcklbw   m0, m1
-    punpcklbw   m2, m3
-    movh        m1, [srcq+3]
-    movh        m3, [srcq+4]
-    add       srcq, sstrideq
-    punpcklbw   m4, m5
-    punpcklbw   m1, m3
-    pmaddubsw   m0, m7
-%if ARCH_X86_64 && mmsize > 8
-    pmaddubsw   m2, m8
-    pmaddubsw   m4, m9
-    pmaddubsw   m1, m10
-%else
-    pmaddubsw   m2, [filteryq+16]
-    pmaddubsw   m4, [filteryq+32]
-    pmaddubsw   m1, [filteryq+48]
-%endif
-    paddw       m0, m2
-    paddw       m4, m1
-    paddsw      m0, m4
-    pmulhrsw    m0, m6
-%ifidn %1, avg
-    movh        m1, [dstq]
-%endif
-    packuswb    m0, m0
-%ifidn %1, avg
-    pavgb       m0, m1
-%endif
-    movh    [dstq], m0
-    add       dstq, dstrideq
-    dec         hd
-    jg .loop
-    RET
-%endmacro
-
-INIT_MMX ssse3
-filter_h_fn put
-filter_h_fn avg
-
-INIT_XMM ssse3
-filter_h_fn put
-filter_h_fn avg
-
-%macro filter_v_fn 1
-%assign %%px mmsize/2
-%if ARCH_X86_64
-cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
-%else
-cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
-    mov   filteryq, r5mp
-%define hd r4mp
-%endif
-    sub       srcq, sstrideq
-    lea  sstride3q, [sstrideq*3]
-    sub       srcq, sstrideq
-    mova        m6, [pw_256]
-    sub       srcq, sstrideq
-    mova        m7, [filteryq+ 0]
-    lea      src4q, [srcq+sstrideq*4]
-%if ARCH_X86_64 && mmsize > 8
-    mova        m8, [filteryq+16]
-    mova        m9, [filteryq+32]
-    mova       m10, [filteryq+48]
-%endif
-.loop:
-    ; FIXME maybe reuse loads from previous rows, or just
-    ; more generally unroll this to prevent multiple loads of
-    ; the same data?
-    movh        m0, [srcq]
-    movh        m1, [srcq+sstrideq]
-    movh        m2, [srcq+sstrideq*2]
-    movh        m3, [srcq+sstride3q]
-    movh        m4, [src4q]
-    movh        m5, [src4q+sstrideq]
-    punpcklbw   m0, m1
-    punpcklbw   m2, m3
-    movh        m1, [src4q+sstrideq*2]
-    movh        m3, [src4q+sstride3q]
-    add       srcq, sstrideq
-    add      src4q, sstrideq
-    punpcklbw   m4, m5
-    punpcklbw   m1, m3
-    pmaddubsw   m0, m7
-%if ARCH_X86_64 && mmsize > 8
-    pmaddubsw   m2, m8
-    pmaddubsw   m4, m9
-    pmaddubsw   m1, m10
-%else
-    pmaddubsw   m2, [filteryq+16]
-    pmaddubsw   m4, [filteryq+32]
-    pmaddubsw   m1, [filteryq+48]
-%endif
-    paddw       m0, m2
-    paddw       m4, m1
-    paddsw      m0, m4
-    pmulhrsw    m0, m6
-%ifidn %1, avg
-    movh        m1, [dstq]
-%endif
-    packuswb    m0, m0
-%ifidn %1, avg
-    pavgb       m0, m1
-%endif
-    movh    [dstq], m0
-    add       dstq, dstrideq
-    dec         hd
-    jg .loop
-    RET
-%endmacro
-
-INIT_MMX ssse3
-filter_v_fn put
-filter_v_fn avg
-
-INIT_XMM ssse3
-filter_v_fn put
-filter_v_fn avg
-
-%macro fpel_fn 6
-%if %2 == 4
-%define %%srcfn movh
-%define %%dstfn movh
-%else
-%define %%srcfn movu
-%define %%dstfn mova
-%endif
-
-%if %2 <= 16
-cglobal %1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
-    lea  sstride3q, [sstrideq*3]
-    lea  dstride3q, [dstrideq*3]
-%else
-cglobal %1%2, 5, 5, 4, dst, dstride, src, sstride, h
-%endif
-.loop:
-    %%srcfn     m0, [srcq]
-    %%srcfn     m1, [srcq+s%3]
-    %%srcfn     m2, [srcq+s%4]
-    %%srcfn     m3, [srcq+s%5]
-    lea       srcq, [srcq+sstrideq*%6]
-%ifidn %1, avg
-    pavgb       m0, [dstq]
-    pavgb       m1, [dstq+d%3]
-    pavgb       m2, [dstq+d%4]
-    pavgb       m3, [dstq+d%5]
-%endif
-    %%dstfn [dstq], m0
-    %%dstfn [dstq+d%3], m1
-    %%dstfn [dstq+d%4], m2
-    %%dstfn [dstq+d%5], m3
-    lea       dstq, [dstq+dstrideq*%6]
-    sub         hd, %6
-    jnz .loop
-    RET
-%endmacro
-
-%define d16 16
-%define s16 16
-INIT_MMX mmx
-fpel_fn put, 4,  strideq, strideq*2, stride3q, 4
-fpel_fn put, 8,  strideq, strideq*2, stride3q, 4
-INIT_MMX sse
-fpel_fn avg, 4,  strideq, strideq*2, stride3q, 4
-fpel_fn avg, 8,  strideq, strideq*2, stride3q, 4
-INIT_XMM sse
-fpel_fn put, 16, strideq, strideq*2, stride3q, 4
-fpel_fn put, 32, mmsize,  strideq,   strideq+mmsize, 2
-fpel_fn put, 64, mmsize,  mmsize*2,  mmsize*3, 1
-INIT_XMM sse2
-fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
-fpel_fn avg, 32, mmsize,  strideq,   strideq+mmsize, 2
-fpel_fn avg, 64, mmsize,  mmsize*2,  mmsize*3, 1
-%undef s16
-%undef d16
diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm
new file mode 100644
index 0000000..21c38b4
--- /dev/null
+++ b/libavcodec/x86/vp9mc.asm
@@ -0,0 +1,278 @@
+;******************************************************************************
+;* VP9 MC SIMD optimizations
+;*
+;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+; FIXME share with vp8dsp.asm
+pw_256:   times 8 dw 256
+
+%macro F8_TAPS 8
+times 8 db %1, %2
+times 8 db %3, %4
+times 8 db %5, %6
+times 8 db %7, %8
+%endmacro
+; int8_t ff_filters_ssse3[3][15][4][16]
+const filters_ssse3 ; smooth
+                    F8_TAPS -3, -1,  32,  64,  38,   1, -3,  0
+                    F8_TAPS -2, -2,  29,  63,  41,   2, -3,  0
+                    F8_TAPS -2, -2,  26,  63,  43,   4, -4,  0
+                    F8_TAPS -2, -3,  24,  62,  46,   5, -4,  0
+                    F8_TAPS -2, -3,  21,  60,  49,   7, -4,  0
+                    F8_TAPS -1, -4,  18,  59,  51,   9, -4,  0
+                    F8_TAPS -1, -4,  16,  57,  53,  12, -4, -1
+                    F8_TAPS -1, -4,  14,  55,  55,  14, -4, -1
+                    F8_TAPS -1, -4,  12,  53,  57,  16, -4, -1
+                    F8_TAPS  0, -4,   9,  51,  59,  18, -4, -1
+                    F8_TAPS  0, -4,   7,  49,  60,  21, -3, -2
+                    F8_TAPS  0, -4,   5,  46,  62,  24, -3, -2
+                    F8_TAPS  0, -4,   4,  43,  63,  26, -2, -2
+                    F8_TAPS  0, -3,   2,  41,  63,  29, -2, -2
+                    F8_TAPS  0, -3,   1,  38,  64,  32, -1, -3
+                    ; regular
+                    F8_TAPS  0,  1,  -5, 126,   8,  -3,  1,  0
+                    F8_TAPS -1,  3, -10, 122,  18,  -6,  2,  0
+                    F8_TAPS -1,  4, -13, 118,  27,  -9,  3, -1
+                    F8_TAPS -1,  4, -16, 112,  37, -11,  4, -1
+                    F8_TAPS -1,  5, -18, 105,  48, -14,  4, -1
+                    F8_TAPS -1,  5, -19,  97,  58, -16,  5, -1
+                    F8_TAPS -1,  6, -19,  88,  68, -18,  5, -1
+                    F8_TAPS -1,  6, -19,  78,  78, -19,  6, -1
+                    F8_TAPS -1,  5, -18,  68,  88, -19,  6, -1
+                    F8_TAPS -1,  5, -16,  58,  97, -19,  5, -1
+                    F8_TAPS -1,  4, -14,  48, 105, -18,  5, -1
+                    F8_TAPS -1,  4, -11,  37, 112, -16,  4, -1
+                    F8_TAPS -1,  3,  -9,  27, 118, -13,  4, -1
+                    F8_TAPS  0,  2,  -6,  18, 122, -10,  3, -1
+                    F8_TAPS  0,  1,  -3,   8, 126,  -5,  1,  0
+                    ; sharp
+                    F8_TAPS -1,  3,  -7, 127,   8,  -3,  1,  0
+                    F8_TAPS -2,  5, -13, 125,  17,  -6,  3, -1
+                    F8_TAPS -3,  7, -17, 121,  27, -10,  5, -2
+                    F8_TAPS -4,  9, -20, 115,  37, -13,  6, -2
+                    F8_TAPS -4, 10, -23, 108,  48, -16,  8, -3
+                    F8_TAPS -4, 10, -24, 100,  59, -19,  9, -3
+                    F8_TAPS -4, 11, -24,  90,  70, -21, 10, -4
+                    F8_TAPS -4, 11, -23,  80,  80, -23, 11, -4
+                    F8_TAPS -4, 10, -21,  70,  90, -24, 11, -4
+                    F8_TAPS -3,  9, -19,  59, 100, -24, 10, -4
+                    F8_TAPS -3,  8, -16,  48, 108, -23, 10, -4
+                    F8_TAPS -2,  6, -13,  37, 115, -20,  9, -4
+                    F8_TAPS -2,  5, -10,  27, 121, -17,  7, -3
+                    F8_TAPS -1,  3,  -6,  17, 125, -13,  5, -2
+                    F8_TAPS  0,  1,  -3,   8, 127,  -7,  3, -1
+
+SECTION .text
+
+%macro filter_h_fn 1
+%assign %%px mmsize/2
+cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery
+    mova        m6, [pw_256]
+    mova        m7, [filteryq+ 0]
+%if ARCH_X86_64 && mmsize > 8
+    mova        m8, [filteryq+16]
+    mova        m9, [filteryq+32]
+    mova       m10, [filteryq+48]
+%endif
+.loop:
+    movh        m0, [srcq-3]
+    movh        m1, [srcq-2]
+    movh        m2, [srcq-1]
+    movh        m3, [srcq+0]
+    movh        m4, [srcq+1]
+    movh        m5, [srcq+2]
+    punpcklbw   m0, m1
+    punpcklbw   m2, m3
+    movh        m1, [srcq+3]
+    movh        m3, [srcq+4]
+    add       srcq, sstrideq
+    punpcklbw   m4, m5
+    punpcklbw   m1, m3
+    pmaddubsw   m0, m7
+%if ARCH_X86_64 && mmsize > 8
+    pmaddubsw   m2, m8
+    pmaddubsw   m4, m9
+    pmaddubsw   m1, m10
+%else
+    pmaddubsw   m2, [filteryq+16]
+    pmaddubsw   m4, [filteryq+32]
+    pmaddubsw   m1, [filteryq+48]
+%endif
+    paddw       m0, m2
+    paddw       m4, m1
+    paddsw      m0, m4
+    pmulhrsw    m0, m6
+%ifidn %1, avg
+    movh        m1, [dstq]
+%endif
+    packuswb    m0, m0
+%ifidn %1, avg
+    pavgb       m0, m1
+%endif
+    movh    [dstq], m0
+    add       dstq, dstrideq
+    dec         hd
+    jg .loop
+    RET
+%endmacro
+
+INIT_MMX ssse3
+filter_h_fn put
+filter_h_fn avg
+
+INIT_XMM ssse3
+filter_h_fn put
+filter_h_fn avg
+
+%macro filter_v_fn 1
+%assign %%px mmsize/2
+%if ARCH_X86_64
+cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
+%else
+cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
+    mov   filteryq, r5mp
+%define hd r4mp
+%endif
+    sub       srcq, sstrideq
+    lea  sstride3q, [sstrideq*3]
+    sub       srcq, sstrideq
+    mova        m6, [pw_256]
+    sub       srcq, sstrideq
+    mova        m7, [filteryq+ 0]
+    lea      src4q, [srcq+sstrideq*4]
+%if ARCH_X86_64 && mmsize > 8
+    mova        m8, [filteryq+16]
+    mova        m9, [filteryq+32]
+    mova       m10, [filteryq+48]
+%endif
+.loop:
+    ; FIXME maybe reuse loads from previous rows, or just
+    ; more generally unroll this to prevent multiple loads of
+    ; the same data?
+    movh        m0, [srcq]
+    movh        m1, [srcq+sstrideq]
+    movh        m2, [srcq+sstrideq*2]
+    movh        m3, [srcq+sstride3q]
+    movh        m4, [src4q]
+    movh        m5, [src4q+sstrideq]
+    punpcklbw   m0, m1
+    punpcklbw   m2, m3
+    movh        m1, [src4q+sstrideq*2]
+    movh        m3, [src4q+sstride3q]
+    add       srcq, sstrideq
+    add      src4q, sstrideq
+    punpcklbw   m4, m5
+    punpcklbw   m1, m3
+    pmaddubsw   m0, m7
+%if ARCH_X86_64 && mmsize > 8
+    pmaddubsw   m2, m8
+    pmaddubsw   m4, m9
+    pmaddubsw   m1, m10
+%else
+    pmaddubsw   m2, [filteryq+16]
+    pmaddubsw   m4, [filteryq+32]
+    pmaddubsw   m1, [filteryq+48]
+%endif
+    paddw       m0, m2
+    paddw       m4, m1
+    paddsw      m0, m4
+    pmulhrsw    m0, m6
+%ifidn %1, avg
+    movh        m1, [dstq]
+%endif
+    packuswb    m0, m0
+%ifidn %1, avg
+    pavgb       m0, m1
+%endif
+    movh    [dstq], m0
+    add       dstq, dstrideq
+    dec         hd
+    jg .loop
+    RET
+%endmacro
+
+INIT_MMX ssse3
+filter_v_fn put
+filter_v_fn avg
+
+INIT_XMM ssse3
+filter_v_fn put
+filter_v_fn avg
+
+%macro fpel_fn 6
+%if %2 == 4
+%define %%srcfn movh
+%define %%dstfn movh
+%else
+%define %%srcfn movu
+%define %%dstfn mova
+%endif
+
+%if %2 <= 16
+cglobal %1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
+    lea  sstride3q, [sstrideq*3]
+    lea  dstride3q, [dstrideq*3]
+%else
+cglobal %1%2, 5, 5, 4, dst, dstride, src, sstride, h
+%endif
+.loop:
+    %%srcfn     m0, [srcq]
+    %%srcfn     m1, [srcq+s%3]
+    %%srcfn     m2, [srcq+s%4]
+    %%srcfn     m3, [srcq+s%5]
+    lea       srcq, [srcq+sstrideq*%6]
+%ifidn %1, avg
+    pavgb       m0, [dstq]
+    pavgb       m1, [dstq+d%3]
+    pavgb       m2, [dstq+d%4]
+    pavgb       m3, [dstq+d%5]
+%endif
+    %%dstfn [dstq], m0
+    %%dstfn [dstq+d%3], m1
+    %%dstfn [dstq+d%4], m2
+    %%dstfn [dstq+d%5], m3
+    lea       dstq, [dstq+dstrideq*%6]
+    sub         hd, %6
+    jnz .loop
+    RET
+%endmacro
+
+%define d16 16
+%define s16 16
+INIT_MMX mmx
+fpel_fn put, 4,  strideq, strideq*2, stride3q, 4
+fpel_fn put, 8,  strideq, strideq*2, stride3q, 4
+INIT_MMX sse
+fpel_fn avg, 4,  strideq, strideq*2, stride3q, 4
+fpel_fn avg, 8,  strideq, strideq*2, stride3q, 4
+INIT_XMM sse
+fpel_fn put, 16, strideq, strideq*2, stride3q, 4
+fpel_fn put, 32, mmsize,  strideq,   strideq+mmsize, 2
+fpel_fn put, 64, mmsize,  mmsize*2,  mmsize*3, 1
+INIT_XMM sse2
+fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
+fpel_fn avg, 32, mmsize,  strideq,   strideq+mmsize, 2
+fpel_fn avg, 64, mmsize,  mmsize*2,  mmsize*3, 1
+%undef s16
+%undef d16



More information about the ffmpeg-cvslog mailing list