[FFmpeg-cvslog] vp9/x86: 32bit and sse2 support for vp9 inverse transform assembly

Ronald S. Bultje git at videolan.org
Mon Dec 15 00:51:16 CET 2014


ffmpeg | branch: master | Ronald S. Bultje <rsbultje at gmail.com> | Mon Dec 15 00:25:10 2014 +0100| [fd77fbb3900665ae8d0d1d8bbf9345822efc3d30] | committer: Michael Niedermayer

vp9/x86: 32bit and sse2 support for vp9 inverse transform assembly

Signed-off-by: Michael Niedermayer <michaelni at gmx.at>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=fd77fbb3900665ae8d0d1d8bbf9345822efc3d30
---

 libavcodec/x86/vp9dsp_init.c |   75 +-
 libavcodec/x86/vp9itxfm.asm  | 2381 ++++++++++++++++++++++++++++++------------
 2 files changed, 1737 insertions(+), 719 deletions(-)

diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c
index 2790024..06e77a8 100644
--- a/libavcodec/x86/vp9dsp_init.c
+++ b/libavcodec/x86/vp9dsp_init.c
@@ -186,11 +186,18 @@ itxfm_func(iadst, idct,  size, opt); \
 itxfm_func(idct,  iadst, size, opt); \
 itxfm_func(iadst, iadst, size, opt)
 
+itxfm_func(idct,  idct,  4, mmxext);
+itxfm_func(idct,  iadst, 4, sse2);
+itxfm_func(iadst, idct,  4, sse2);
+itxfm_func(iadst, iadst, 4, sse2);
 itxfm_funcs(4, ssse3);
+itxfm_funcs(8, sse2);
 itxfm_funcs(8, ssse3);
 itxfm_funcs(8, avx);
+itxfm_funcs(16, sse2);
 itxfm_funcs(16, ssse3);
 itxfm_funcs(16, avx);
+itxfm_func(idct, idct, 32, sse2);
 itxfm_func(idct, idct, 32, ssse3);
 itxfm_func(idct, idct, 32, avx);
 itxfm_func(iwht, iwht, 4, mmx);
@@ -352,6 +359,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
     if (EXTERNAL_MMXEXT(cpu_flags)) {
         init_fpel(4, 1,  4, avg, mmxext);
         init_fpel(3, 1,  8, avg, mmxext);
+        dsp->itxfm_add[TX_4X4][DCT_DCT] = ff_vp9_idct_idct_4x4_add_mmxext;
     }
 
     if (EXTERNAL_SSE(cpu_flags)) {
@@ -365,6 +373,21 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
         init_fpel(1, 1, 32, avg, sse2);
         init_fpel(0, 1, 64, avg, sse2);
         init_lpf(sse2);
+        dsp->itxfm_add[TX_4X4][ADST_DCT]  = ff_vp9_idct_iadst_4x4_add_sse2;
+        dsp->itxfm_add[TX_4X4][DCT_ADST]  = ff_vp9_iadst_idct_4x4_add_sse2;
+        dsp->itxfm_add[TX_4X4][ADST_ADST] = ff_vp9_iadst_iadst_4x4_add_sse2;
+        dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_sse2;
+        dsp->itxfm_add[TX_8X8][ADST_DCT]  = ff_vp9_idct_iadst_8x8_add_sse2;
+        dsp->itxfm_add[TX_8X8][DCT_ADST]  = ff_vp9_iadst_idct_8x8_add_sse2;
+        dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_sse2;
+        dsp->itxfm_add[TX_16X16][DCT_DCT]   = ff_vp9_idct_idct_16x16_add_sse2;
+        dsp->itxfm_add[TX_16X16][ADST_DCT]  = ff_vp9_idct_iadst_16x16_add_sse2;
+        dsp->itxfm_add[TX_16X16][DCT_ADST]  = ff_vp9_iadst_idct_16x16_add_sse2;
+        dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_sse2;
+        dsp->itxfm_add[TX_32X32][ADST_ADST] =
+        dsp->itxfm_add[TX_32X32][ADST_DCT] =
+        dsp->itxfm_add[TX_32X32][DCT_ADST] =
+        dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_sse2;
         dsp->intra_pred[TX_16X16][VERT_PRED] = ff_vp9_ipred_v_16x16_sse2;
         dsp->intra_pred[TX_32X32][VERT_PRED] = ff_vp9_ipred_v_32x32_sse2;
     }
@@ -376,20 +399,18 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
         dsp->itxfm_add[TX_4X4][ADST_DCT]  = ff_vp9_idct_iadst_4x4_add_ssse3;
         dsp->itxfm_add[TX_4X4][DCT_ADST]  = ff_vp9_iadst_idct_4x4_add_ssse3;
         dsp->itxfm_add[TX_4X4][ADST_ADST] = ff_vp9_iadst_iadst_4x4_add_ssse3;
-        if (ARCH_X86_64) {
-            dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_ssse3;
-            dsp->itxfm_add[TX_8X8][ADST_DCT]  = ff_vp9_idct_iadst_8x8_add_ssse3;
-            dsp->itxfm_add[TX_8X8][DCT_ADST]  = ff_vp9_iadst_idct_8x8_add_ssse3;
-            dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_ssse3;
-            dsp->itxfm_add[TX_16X16][DCT_DCT]   = ff_vp9_idct_idct_16x16_add_ssse3;
-            dsp->itxfm_add[TX_16X16][ADST_DCT]  = ff_vp9_idct_iadst_16x16_add_ssse3;
-            dsp->itxfm_add[TX_16X16][DCT_ADST]  = ff_vp9_iadst_idct_16x16_add_ssse3;
-            dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_ssse3;
-            dsp->itxfm_add[TX_32X32][ADST_ADST] =
-            dsp->itxfm_add[TX_32X32][ADST_DCT] =
-            dsp->itxfm_add[TX_32X32][DCT_ADST] =
-            dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_ssse3;
-        }
+        dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_ssse3;
+        dsp->itxfm_add[TX_8X8][ADST_DCT]  = ff_vp9_idct_iadst_8x8_add_ssse3;
+        dsp->itxfm_add[TX_8X8][DCT_ADST]  = ff_vp9_iadst_idct_8x8_add_ssse3;
+        dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_ssse3;
+        dsp->itxfm_add[TX_16X16][DCT_DCT]   = ff_vp9_idct_idct_16x16_add_ssse3;
+        dsp->itxfm_add[TX_16X16][ADST_DCT]  = ff_vp9_idct_iadst_16x16_add_ssse3;
+        dsp->itxfm_add[TX_16X16][DCT_ADST]  = ff_vp9_iadst_idct_16x16_add_ssse3;
+        dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_ssse3;
+        dsp->itxfm_add[TX_32X32][ADST_ADST] =
+        dsp->itxfm_add[TX_32X32][ADST_DCT] =
+        dsp->itxfm_add[TX_32X32][DCT_ADST] =
+        dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_ssse3;
         init_lpf(ssse3);
         init_dc_ipred(TX_4X4,    4, ssse3);
         init_dc_ipred(TX_8X8,    8, ssse3);
@@ -398,20 +419,18 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
     }
 
     if (EXTERNAL_AVX(cpu_flags)) {
-        if (ARCH_X86_64) {
-            dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_avx;
-            dsp->itxfm_add[TX_8X8][ADST_DCT]  = ff_vp9_idct_iadst_8x8_add_avx;
-            dsp->itxfm_add[TX_8X8][DCT_ADST]  = ff_vp9_iadst_idct_8x8_add_avx;
-            dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_avx;
-            dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_avx;
-            dsp->itxfm_add[TX_16X16][ADST_DCT]  = ff_vp9_idct_iadst_16x16_add_avx;
-            dsp->itxfm_add[TX_16X16][DCT_ADST]  = ff_vp9_iadst_idct_16x16_add_avx;
-            dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_avx;
-            dsp->itxfm_add[TX_32X32][ADST_ADST] =
-            dsp->itxfm_add[TX_32X32][ADST_DCT] =
-            dsp->itxfm_add[TX_32X32][DCT_ADST] =
-            dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_avx;
-        }
+        dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_avx;
+        dsp->itxfm_add[TX_8X8][ADST_DCT]  = ff_vp9_idct_iadst_8x8_add_avx;
+        dsp->itxfm_add[TX_8X8][DCT_ADST]  = ff_vp9_iadst_idct_8x8_add_avx;
+        dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_avx;
+        dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_avx;
+        dsp->itxfm_add[TX_16X16][ADST_DCT]  = ff_vp9_idct_iadst_16x16_add_avx;
+        dsp->itxfm_add[TX_16X16][DCT_ADST]  = ff_vp9_iadst_idct_16x16_add_avx;
+        dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_avx;
+        dsp->itxfm_add[TX_32X32][ADST_ADST] =
+        dsp->itxfm_add[TX_32X32][ADST_DCT] =
+        dsp->itxfm_add[TX_32X32][DCT_ADST] =
+        dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_avx;
         init_fpel(1, 0, 32, put, avx);
         init_fpel(0, 0, 64, put, avx);
         init_lpf(avx);
diff --git a/libavcodec/x86/vp9itxfm.asm b/libavcodec/x86/vp9itxfm.asm
index 6cce0aa..908040c 100644
--- a/libavcodec/x86/vp9itxfm.asm
+++ b/libavcodec/x86/vp9itxfm.asm
@@ -27,6 +27,8 @@ SECTION_RODATA
 
 pw_11585x2:  times 8 dw 23170
 pw_m11585x2: times 8 dw -23170
+pw_m11585_11585: times 4 dw -11585, 11585
+pw_11585_11585: times 8 dw 11585
 
 %macro VP9_IDCT_COEFFS 2-3 0
 pw_%1x2:    times 8 dw  %1*2
@@ -64,9 +66,13 @@ pw_15212_m13377: times 4 dw 15212, -13377
 pw_15212_9929: times 4 dw 15212, 9929
 pw_m5283_m15212: times 4 dw -5283, -15212
 pw_13377x2: times 8 dw 13377*2
+pw_13377_m13377: times 4 dw 13377, -13377
 
 pd_8192: times 4 dd 8192
 
+cextern pw_8
+cextern pw_16
+cextern pw_32
 cextern pw_512
 cextern pw_1024
 cextern pw_2048
@@ -204,9 +210,13 @@ cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
 %endmacro
 
 %macro VP9_IDCT4_1D 0
+%if cpuflag(ssse3)
     SUMSUB_BA            w, 2, 0, 4                         ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
     pmulhrsw            m2, m6                              ; m2=t0
     pmulhrsw            m0, m6                              ; m0=t1
+%else ; <= sse2
+    VP9_UNPACK_MULSUB_2W_4X 0, 2, 11585, 11585, m7, 4, 5    ; m0=t1, m1=t0
+%endif
     VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5     ; m1=t2, m3=t3
     VP9_IDCT4_1D_FINALIZE
 %endmacro
@@ -222,38 +232,74 @@ cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
 %endmacro
 
 %macro VP9_IDCT4_WRITEOUT 0
+%if cpuflag(ssse3)
     mova                m5, [pw_2048]
     pmulhrsw            m0, m5              ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
     pmulhrsw            m1, m5
+%else
+    mova                m5, [pw_8]
+    paddw               m0, m5
+    paddw               m1, m5
+    psraw               m0, 4
+    psraw               m1, 4
+%endif
     VP9_STORE_2X         0,  1,  6,  7,  4
     lea               dstq, [dstq+2*strideq]
+%if cpuflag(ssse3)
     pmulhrsw            m2, m5
     pmulhrsw            m3, m5
+%else
+    paddw               m2, m5
+    paddw               m3, m5
+    psraw               m2, 4
+    psraw               m3, 4
+%endif
     VP9_STORE_2X         2,  3,  6,  7,  4
 %endmacro
 
-INIT_MMX ssse3
-cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
+%macro IDCT_4x4_FN 1
+INIT_MMX %1
+cglobal vp9_idct_idct_4x4_add, 4, 4, 0, dst, stride, block, eob
 
+%if cpuflag(ssse3)
     cmp eobd, 4 ; 2x2 or smaller
     jg .idctfull
 
     cmp eobd, 1 ; faster path for when only DC is set
     jne .idct2x2
+%else
+    cmp eobd, 1
+    jg .idctfull
+%endif
 
+%if cpuflag(ssse3)
     movd                m0, [blockq]
     mova                m5, [pw_11585x2]
     pmulhrsw            m0, m5
     pmulhrsw            m0, m5
+%else
+    DEFINE_ARGS dst, stride, block, coef
+    movsx            coefd, word [blockq]
+    imul             coefd, 11585
+    add              coefd, 8192
+    sar              coefd, 14
+    imul             coefd, 11585
+    add              coefd, (8 << 14) + 8192
+    sar              coefd, 14 + 4
+    movd                m0, coefd
+%endif
     pshufw              m0, m0, 0
     pxor                m4, m4
     movh          [blockq], m4
+%if cpuflag(ssse3)
     pmulhrsw            m0, [pw_2048]       ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
+%endif
     VP9_STORE_2X         0,  0,  6,  7,  4
     lea               dstq, [dstq+2*strideq]
     VP9_STORE_2X         0,  0,  6,  7,  4
     RET
 
+%if cpuflag(ssse3)
 ; faster path for when only top left 2x2 block is set
 .idct2x2:
     movd                m0, [blockq+0]
@@ -262,20 +308,27 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
     mova                m6, [pw_6270x2]
     mova                m7, [pw_15137x2]
     VP9_IDCT4_2x2_1D
-    TRANSPOSE4x4W  0, 1, 2, 3, 4
+    ; partial 2x4 transpose
+    punpcklwd           m0, m1
+    punpcklwd           m2, m3
+    SBUTTERFLY          dq, 0, 2, 1
+    SWAP                1, 2
     VP9_IDCT4_2x2_1D
     pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
     movh       [blockq+ 0], m4
     movh       [blockq+ 8], m4
     VP9_IDCT4_WRITEOUT
     RET
+%endif
 
 .idctfull: ; generic full 4x4 idct/idct
     mova                m0, [blockq+ 0]
     mova                m1, [blockq+ 8]
     mova                m2, [blockq+16]
     mova                m3, [blockq+24]
+%if cpuflag(ssse3)
     mova                m6, [pw_11585x2]
+%endif
     mova                m7, [pd_8192]       ; rounding
     VP9_IDCT4_1D
     TRANSPOSE4x4W  0, 1, 2, 3, 4
@@ -287,6 +340,10 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
     mova       [blockq+24], m4
     VP9_IDCT4_WRITEOUT
     RET
+%endmacro
+
+IDCT_4x4_FN mmxext
+IDCT_4x4_FN ssse3
 
 ;-------------------------------------------------------------------------------------------
 ; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
@@ -297,7 +354,12 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
     movq2dq           xmm1, m1
     movq2dq           xmm2, m2
     movq2dq           xmm3, m3
+%if cpuflag(ssse3)
     paddw               m3, m0
+%else
+    paddw             xmm6, xmm3, xmm0
+    punpcklwd         xmm6, xmm2
+%endif
     punpcklwd         xmm0, xmm1
     punpcklwd         xmm2, xmm3
     pmaddwd           xmm1, xmm0, [pw_5283_13377]
@@ -305,35 +367,57 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
     pmaddwd           xmm0, [pw_15212_m13377]
     pmaddwd           xmm3, xmm2, [pw_15212_9929]
     pmaddwd           xmm2, [pw_m5283_m15212]
+%if cpuflag(ssse3)
     psubw               m3, m2
+%else
+    pmaddwd           xmm6, [pw_13377_m13377]
+%endif
     paddd             xmm0, xmm2
-    paddd             xmm3, [pd_8192]
-    paddd             xmm2, [pd_8192]
+    paddd             xmm3, xmm5
+    paddd             xmm2, xmm5
+%if notcpuflag(ssse3)
+    paddd             xmm6, xmm5
+%endif
     paddd             xmm1, xmm3
     paddd             xmm0, xmm3
     paddd             xmm4, xmm2
     psrad             xmm1, 14
     psrad             xmm0, 14
     psrad             xmm4, 14
+%if cpuflag(ssse3)
     pmulhrsw            m3, [pw_13377x2]        ; out2
+%else
+    psrad             xmm6, 14
+%endif
     packssdw          xmm0, xmm0
     packssdw          xmm1, xmm1
     packssdw          xmm4, xmm4
+%if notcpuflag(ssse3)
+    packssdw          xmm6, xmm6
+%endif
     movdq2q             m0, xmm0                ; out3
     movdq2q             m1, xmm1                ; out0
     movdq2q             m2, xmm4                ; out1
+%if notcpuflag(ssse3)
+    movdq2q             m3, xmm6                ; out2
+%endif
     SWAP                 0, 1, 2, 3
 %endmacro
 
 %macro IADST4_FN 5
 INIT_MMX %5
-cglobal vp9_%1_%3_4x4_add, 3, 3, 8, dst, stride, block, eob
+cglobal vp9_%1_%3_4x4_add, 3, 3, 6 + notcpuflag(ssse3), dst, stride, block, eob
+    movdqa            xmm5, [pd_8192]
     mova                m0, [blockq+ 0]
     mova                m1, [blockq+ 8]
     mova                m2, [blockq+16]
     mova                m3, [blockq+24]
+%if cpuflag(ssse3)
     mova                m6, [pw_11585x2]
-    mova                m7, [pd_8192]       ; rounding
+%endif
+%ifnidn %1%3, iadstiadst
+    movdq2q             m7, xmm5
+%endif
     VP9_%2_1D
     TRANSPOSE4x4W  0, 1, 2, 3, 4
     VP9_%4_1D
@@ -346,107 +430,210 @@ cglobal vp9_%1_%3_4x4_add, 3, 3, 8, dst, stride, block, eob
     RET
 %endmacro
 
+IADST4_FN idct,  IDCT4,  iadst, IADST4, sse2
+IADST4_FN iadst, IADST4, idct,  IDCT4,  sse2
+IADST4_FN iadst, IADST4, iadst, IADST4, sse2
+
 IADST4_FN idct,  IDCT4,  iadst, IADST4, ssse3
 IADST4_FN iadst, IADST4, idct,  IDCT4,  ssse3
 IADST4_FN iadst, IADST4, iadst, IADST4, ssse3
 
-%if ARCH_X86_64 ; TODO: 32-bit? (32-bit limited to 8 xmm reg, we use more)
+%macro SCRATCH 3
+%if ARCH_X86_64
+    SWAP                %1, %2
+%else
+    mova              [%3], m%1
+%endif
+%endmacro
+
+%macro UNSCRATCH 3
+%if ARCH_X86_64
+    SWAP                %1, %2
+%else
+    mova               m%1, [%3]
+%endif
+%endmacro
 
 ;-------------------------------------------------------------------------------------------
 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
 ;-------------------------------------------------------------------------------------------
 
 %macro VP9_IDCT8_1D_FINALIZE 0
-    SUMSUB_BA            w,  3, 10, 4                       ;  m3=t0+t7, m10=t0-t7
-    SUMSUB_BA            w,  1,  2, 4                       ;  m1=t1+t6,  m2=t1-t6
-    SUMSUB_BA            w, 11,  0, 4                       ; m11=t2+t5,  m0=t2-t5
-    SUMSUB_BA            w,  9,  8, 4                       ;  m9=t3+t4,  m8=t3-t4
-    SWAP                11, 10, 2
-    SWAP                 3,  9, 0
+    SUMSUB_BA            w,  3,  6, 5                       ; m3=t0+t7, m6=t0-t7
+    SUMSUB_BA            w,  1,  2, 5                       ; m1=t1+t6, m2=t1-t6
+    SUMSUB_BA            w,  7,  0, 5                       ; m7=t2+t5, m0=t2-t5
+
+    UNSCRATCH            5, 8, blockq+ 0
+    SCRATCH              2, 8, blockq+ 0
+
+    SUMSUB_BA            w,  5,  4, 2                       ; m5=t3+t4, m4=t3-t4
+    SWAP                 7,  6,  2
+    SWAP                 3,  5,  0
+
+%if ARCH_X86_64
+    SWAP                 6, 8
+%endif
 %endmacro
 
+; x86-32
+; - in: m0/m4 is in mem
+; - out: m6 is in mem
+; x86-64:
+; - everything is in registers (m0-7)
 %macro VP9_IDCT8_1D 0
-    SUMSUB_BA            w, 8, 0, 4                         ; m8=IN(0)+IN(4) m0=IN(0)-IN(4)
-    pmulhrsw            m8, m12                             ; m8=t0a
-    pmulhrsw            m0, m12                             ; m0=t1a
-    VP9_UNPACK_MULSUB_2W_4X 2, 10, 15137,  6270, m7, 4, 5   ; m2=t2a, m10=t3a
-    VP9_UNPACK_MULSUB_2W_4X 1, 11, 16069,  3196, m7, 4, 5   ; m1=t4a, m11=t7a
-    VP9_UNPACK_MULSUB_2W_4X 9,  3,  9102, 13623, m7, 4, 5   ; m9=t5a,  m3=t6a
-    SUMSUB_BA            w, 10,  8, 4                       ; m10=t0a+t3a (t0),  m8=t0a-t3a (t3)
-    SUMSUB_BA            w,  2,  0, 4                       ;  m2=t1a+t2a (t1),  m0=t1a-t2a (t2)
-    SUMSUB_BA            w,  9,  1, 4                       ;  m9=t4a+t5a (t4),  m1=t4a-t5a (t5a)
-    SUMSUB_BA            w,  3, 11, 4                       ;  m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
-    SUMSUB_BA            w,  1, 11, 4                       ;  m1=t6a+t5a (t6), m11=t6a-t5a (t5)
-    pmulhrsw            m1, m12                             ; m1=t6
-    pmulhrsw           m11, m12                             ; m11=t5
+%if ARCH_X86_64
+    SWAP                 0, 8
+    SWAP                 4, 9
+%endif
+
+    VP9_UNPACK_MULSUB_2W_4X 5,  3,  9102, 13623, D_8192_REG, 0, 4  ; m5=t5a, m3=t6a
+    VP9_UNPACK_MULSUB_2W_4X 1,  7, 16069,  3196, D_8192_REG, 0, 4  ; m1=t4a, m7=t7a
+    SUMSUB_BA            w,  5,  1, 0                       ; m5=t4a+t5a (t4), m1=t4a-t5a (t5a)
+    SUMSUB_BA            w,  3,  7, 0                       ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
+%if cpuflag(ssse3)
+    SUMSUB_BA            w,  1,  7, 0                       ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
+    pmulhrsw            m1, W_11585x2_REG                   ; m1=t6
+    pmulhrsw            m7, W_11585x2_REG                   ; m7=t5
+%else
+    VP9_UNPACK_MULSUB_2W_4X 7,  1, 11585, 11585, D_8192_REG, 0, 4
+%endif
+    VP9_UNPACK_MULSUB_2W_4X 2,  6, 15137,  6270, D_8192_REG, 0, 4  ; m2=t2a, m6=t3a
+
+    UNSCRATCH            0, 8, blockq+ 0    ; IN(0)
+    UNSCRATCH            4, 9, blockq+64    ; IN(4)
+    SCRATCH              5, 8, blockq+ 0
+
+%if cpuflag(ssse3)
+    SUMSUB_BA            w, 4, 0, 5                         ; m4=IN(0)+IN(4) m0=IN(0)-IN(4)
+    pmulhrsw            m4, W_11585x2_REG                   ; m4=t0a
+    pmulhrsw            m0, W_11585x2_REG                   ; m0=t1a
+%else
+    SCRATCH              7, 9, blockq+64
+    VP9_UNPACK_MULSUB_2W_4X 0,  4, 11585, 11585, D_8192_REG, 5, 7
+    UNSCRATCH            7, 9, blockq+64
+%endif
+    SUMSUB_BA            w,  6,  4, 5                       ; m6=t0a+t3a (t0), m4=t0a-t3a (t3)
+    SUMSUB_BA            w,  2,  0, 5                       ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
+
     VP9_IDCT8_1D_FINALIZE
 %endmacro
 
 %macro VP9_IDCT8_4x4_1D 0
-    pmulhrsw            m0, m12                             ; m0=t1a/t0a
-    pmulhrsw           m10, m2, [pw_15137x2]                ; m10=t3a
+    pmulhrsw            m0, W_11585x2_REG                   ; m0=t1a/t0a
+    pmulhrsw            m6, m2, [pw_15137x2]                ; m6=t3a
     pmulhrsw            m2, [pw_6270x2]                     ; m2=t2a
-    pmulhrsw           m11, m1, [pw_16069x2]                ; m11=t7a
+    pmulhrsw            m7, m1, [pw_16069x2]                ; m7=t7a
     pmulhrsw            m1, [pw_3196x2]                     ; m1=t4a
-    pmulhrsw            m9, m3, [pw_9102x2]                 ; m9=-t5a
+    pmulhrsw            m5, m3, [pw_9102x2]                 ; m5=-t5a
     pmulhrsw            m3, [pw_13623x2]                    ; m3=t6a
-    psubw               m8, m0, m10                         ; m8=t0a-t3a (t3)
-    paddw              m10, m0                              ; m10=t0a+t3a (t0)
-    SUMSUB_BA            w,  2,  0, 4                       ;  m2=t1a+t2a (t1),  m0=t1a-t2a (t2)
-    SUMSUB_BA            w,  9,  1, 4                       ;  m1=t4a+t5a (t4),  m9=t4a-t5a (t5a)
-    SWAP                 1,  9
-    SUMSUB_BA            w,  3, 11, 4                       ;  m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
-    SUMSUB_BA            w,  1, 11, 4                       ;  m1=t6a+t5a (t6), m11=t6a-t5a (t5)
-    pmulhrsw            m1, m12                             ; m1=t6
-    pmulhrsw           m11, m12                             ; m11=t5
+    SUMSUB_BA            w,  5,  1, 4                       ; m1=t4a+t5a (t4), m5=t4a-t5a (t5a)
+    SWAP                 1,  5
+    SUMSUB_BA            w,  3,  7, 4                       ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
+    SUMSUB_BA            w,  1,  7, 4                       ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
+    pmulhrsw            m1, W_11585x2_REG                   ; m1=t6
+    pmulhrsw            m7, W_11585x2_REG                   ; m7=t5
+    psubw               m4, m0, m6                          ; m4=t0a-t3a (t3)
+    paddw               m6, m0                              ; m6=t0a+t3a (t0)
+    SCRATCH              5,  8, blockq+ 0
+    SUMSUB_BA            w,  2,  0, 5                       ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
     VP9_IDCT8_1D_FINALIZE
 %endmacro
 
-; TODO: a lot of t* copies can probably be removed and merged with
-; following SUMSUBs from VP9_IDCT8_1D_FINALIZE with AVX
-%macro VP9_IDCT8_2x2_1D 0
-    pmulhrsw            m0, m12                             ;  m0=t0
-    mova                m3, m1
-    pmulhrsw            m1, m6                              ;  m1=t4
-    pmulhrsw            m3, m7                              ;  m3=t7
-    mova                m2, m0                              ;  m2=t1
-    mova               m10, m0                              ; m10=t2
-    mova                m8, m0                              ;  m8=t3
-    mova               m11, m3                              ; t5 = t7a ...
-    mova                m9, m3                              ; t6 = t7a ...
-    psubw              m11, m1                              ; t5 = t7a - t4a
-    paddw               m9, m1                              ; t6 = t7a + t4a
-    pmulhrsw           m11, m12                             ; m11=t5
-    pmulhrsw            m9, m12                             ;  m9=t6
-    SWAP                 0, 10
-    SWAP                 9,  1
-    VP9_IDCT8_1D_FINALIZE
+%macro VP9_IDCT8_2x2_1D 1
+    pmulhrsw            m0, W_11585x2_REG                   ; m0=t0
+    pmulhrsw            m3, m1, W_16069x2_REG               ; m3=t7
+    pmulhrsw            m1, W_3196x2_REG                    ; m1=t4
+    psubw               m7, m3, m1                          ; t5 = t7a - t4a
+    paddw               m5, m3, m1                          ; t6 = t7a + t4a
+    pmulhrsw            m7, W_11585x2_REG                   ; m7=t5
+    pmulhrsw            m5, W_11585x2_REG                   ; m5=t6
+    SWAP                 5,  1
+    ; merged VP9_IDCT8_1D_FINALIZE to make register-sharing w/ avx easier
+    psubw               m6, m0, m3                          ; m6=t0-t7
+    paddw               m3, m0                              ; m3=t0+t7
+    psubw               m2, m0, m1                          ; m2=t1-t6
+    paddw               m1, m0                              ; m1=t1+t6
+%if %1 == 1
+    punpcklwd           m3, m1
+%define SCRATCH_REG 1
+%elif ARCH_X86_32
+    mova       [blockq+ 0], m2
+%define SCRATCH_REG 2
+%else
+%define SCRATCH_REG 8
+%endif
+    psubw               m4, m0, m5                          ; m4=t3-t4
+    paddw               m5, m0                              ; m5=t3+t4
+    SUMSUB_BA            w,  7,  0, SCRATCH_REG             ; m7=t2+t5, m0=t2-t5
+    SWAP                 7,  6,  2
+    SWAP                 3,  5,  0
+%undef SCRATCH_REG
 %endmacro
 
+%macro VP9_IDCT8_WRITEx2 6-8 5 ; line1, line2, tmp1, tmp2, zero, pw_1024/pw_16, shift
+%if cpuflag(ssse3)
+    pmulhrsw           m%1, %6              ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
+    pmulhrsw           m%2, %6
+%else
+    paddw              m%1, %6
+    paddw              m%2, %6
+    psraw              m%1, %7
+    psraw              m%2, %7
+%endif
+%if %0 <= 7
+    VP9_STORE_2X        %1, %2, %3, %4, %5
+%else
+    VP9_STORE_2X        %1, %2, %3, %4, %5, %8
+%endif
+%endmacro
+
+; x86-32:
+; - m6 is in mem
+; x86-64:
+; - m8 holds m6 (SWAP)
+; m6 holds zero
 %macro VP9_IDCT8_WRITEOUT 0
-    mova                m5, [pw_1024]
-    pmulhrsw            m0, m5              ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
-    pmulhrsw            m1, m5
-    VP9_STORE_2X         0,  1,  6,  7,  4
+%if ARCH_X86_64
+%if cpuflag(ssse3)
+    mova                m9, [pw_1024]
+%else
+    mova                m9, [pw_16]
+%endif
+%define ROUND_REG m9
+%else
+%if cpuflag(ssse3)
+%define ROUND_REG [pw_1024]
+%else
+%define ROUND_REG [pw_16]
+%endif
+%endif
+    SCRATCH              5, 10, blockq+16
+    SCRATCH              7, 11, blockq+32
+    VP9_IDCT8_WRITEx2    0,  1, 5, 7, 6, ROUND_REG
     lea               dstq, [dstq+2*strideq]
-    pmulhrsw            m2, m5
-    pmulhrsw            m3, m5
-    VP9_STORE_2X         2,  3,  6,  7,  4
+    VP9_IDCT8_WRITEx2    2,  3, 5, 7, 6, ROUND_REG
     lea               dstq, [dstq+2*strideq]
-    pmulhrsw            m8, m5
-    pmulhrsw            m9, m5
-    VP9_STORE_2X         8,  9,  6,  7,  4
+    UNSCRATCH            5, 10, blockq+16
+    UNSCRATCH            7, 11, blockq+32
+    VP9_IDCT8_WRITEx2    4,  5, 0, 1, 6, ROUND_REG
     lea               dstq, [dstq+2*strideq]
-    pmulhrsw           m10, m5
-    pmulhrsw           m11, m5
-    VP9_STORE_2X        10, 11,  6,  7,  4
+    UNSCRATCH            5, 8, blockq+ 0
+    VP9_IDCT8_WRITEx2    5,  7, 0, 1, 6, ROUND_REG
+
+%undef ROUND_REG
 %endmacro
 
-%macro VP9_IDCT_IDCT_8x8_ADD_XMM 1
+%macro VP9_IDCT_IDCT_8x8_ADD_XMM 2
 INIT_XMM %1
-cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
+cglobal vp9_idct_idct_8x8_add, 4, 4, %2, dst, stride, block, eob
 
+%if cpuflag(ssse3)
+%if ARCH_X86_64
     mova               m12, [pw_11585x2]    ; often used
+%define W_11585x2_REG m12
+%else
+%define W_11585x2_REG [pw_11585x2]
+%endif
 
     cmp eobd, 12 ; top left half or less
     jg .idctfull
@@ -456,38 +643,78 @@ cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
 
     cmp eobd, 1 ; faster path for when only DC is set
     jne .idcttopleftcorner
+%else
+    cmp eobd, 1
+    jg .idctfull
+%endif
 
+%if cpuflag(ssse3)
     movd                m0, [blockq]
-    pmulhrsw            m0, m12
-    pmulhrsw            m0, m12
+    pmulhrsw            m0, W_11585x2_REG
+    pmulhrsw            m0, W_11585x2_REG
+%else
+    DEFINE_ARGS dst, stride, block, coef
+    movsx            coefd, word [blockq]
+    imul             coefd, 11585
+    add              coefd, 8192
+    sar              coefd, 14
+    imul             coefd, 11585
+    add              coefd, (16 << 14) + 8192
+    sar              coefd, 14 + 5
+    movd                m0, coefd
+%endif
     SPLATW              m0, m0, 0
     pxor                m4, m4
     movd          [blockq], m4
-    mova                m5, [pw_1024]
-    pmulhrsw            m0, m5              ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
-    VP9_STORE_2X         0,  0,  6,  7,  4
-    lea               dstq, [dstq+2*strideq]
-    VP9_STORE_2X         0,  0,  6,  7,  4
-    lea               dstq, [dstq+2*strideq]
+%if cpuflag(ssse3)
+    pmulhrsw            m0, [pw_1024]       ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
+%endif
+%rep 3
     VP9_STORE_2X         0,  0,  6,  7,  4
     lea               dstq, [dstq+2*strideq]
+%endrep
     VP9_STORE_2X         0,  0,  6,  7,  4
     RET
 
+%if cpuflag(ssse3)
 ; faster path for when only left corner is set (3 input: DC, right to DC, below
 ; to DC). Note: also working with a 2x2 block
 .idcttopleftcorner:
     movd                m0, [blockq+0]
     movd                m1, [blockq+16]
-    mova                m6, [pw_3196x2]
-    mova                m7, [pw_16069x2]
-    VP9_IDCT8_2x2_1D
-    TRANSPOSE8x8W  0, 1, 2, 3, 8, 9, 10, 11, 4
-    VP9_IDCT8_2x2_1D
-    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
-    movd       [blockq+ 0], m4
-    movd       [blockq+16], m4
+%if ARCH_X86_64
+    mova               m10, [pw_3196x2]
+    mova               m11, [pw_16069x2]
+%define W_3196x2_REG m10
+%define W_16069x2_REG m11
+%else
+%define W_3196x2_REG [pw_3196x2]
+%define W_16069x2_REG [pw_16069x2]
+%endif
+    VP9_IDCT8_2x2_1D 1
+    ; partial 2x8 transpose
+    ; punpcklwd m0, m1 already done inside idct
+    punpcklwd           m2, m3
+    punpcklwd           m4, m5
+    punpcklwd           m6, m7
+    punpckldq           m0, m2
+    punpckldq           m4, m6
+    SBUTTERFLY         qdq, 0, 4, 1
+    SWAP                 1, 4
+    VP9_IDCT8_2x2_1D 2
+%if ARCH_X86_64
+    SWAP                 6, 8
+%endif
+    pxor                m6, m6  ; used for the block reset, and VP9_STORE_2X
     VP9_IDCT8_WRITEOUT
+%if ARCH_X86_64
+    movd       [blockq+ 0], m6
+    movd       [blockq+16], m6
+%else
+    mova       [blockq+ 0], m6
+    mova       [blockq+16], m6
+    mova       [blockq+32], m6
+%endif
     RET
 
 .idcthalf:
@@ -496,131 +723,281 @@ cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
     movh                m2, [blockq +32]
     movh                m3, [blockq +48]
     VP9_IDCT8_4x4_1D
-    TRANSPOSE8x8W  0, 1, 2, 3, 8, 9, 10, 11, 4
+    ; partial 4x8 transpose
+%if ARCH_X86_32
+    mova                m6, [blockq+ 0]
+%endif
+    punpcklwd           m0, m1
+    punpcklwd           m2, m3
+    punpcklwd           m4, m5
+    punpcklwd           m6, m7
+    SBUTTERFLY          dq, 0, 2, 1
+    SBUTTERFLY          dq, 4, 6, 5
+    SBUTTERFLY         qdq, 0, 4, 1
+    SBUTTERFLY         qdq, 2, 6, 5
+    SWAP                 1, 4
+    SWAP                 3, 6
     VP9_IDCT8_4x4_1D
-    pxor                m4, m4
-    movh       [blockq+ 0], m4
-    movh       [blockq+16], m4
-    movh       [blockq+32], m4
-    movh       [blockq+48], m4
+%if ARCH_X86_64
+    SWAP                 6, 8
+%endif
+    pxor                m6, m6
     VP9_IDCT8_WRITEOUT
+%if ARCH_X86_64
+    movh       [blockq+ 0], m6
+    movh       [blockq+16], m6
+    movh       [blockq+32], m6
+%else
+    mova       [blockq+ 0], m6
+    mova       [blockq+16], m6
+    mova       [blockq+32], m6
+%endif
+    movh       [blockq+48], m6
     RET
+%endif
 
 .idctfull: ; generic full 8x8 idct/idct
+%if ARCH_X86_64
     mova                m0, [blockq+  0]    ; IN(0)
+%endif
     mova                m1, [blockq+ 16]    ; IN(1)
     mova                m2, [blockq+ 32]    ; IN(2)
     mova                m3, [blockq+ 48]    ; IN(3)
-    mova                m8, [blockq+ 64]    ; IN(4)
-    mova                m9, [blockq+ 80]    ; IN(5)
-    mova               m10, [blockq+ 96]    ; IN(6)
-    mova               m11, [blockq+112]    ; IN(7)
-    mova                m7, [pd_8192]       ; rounding
+%if ARCH_X86_64
+    mova                m4, [blockq+ 64]    ; IN(4)
+%endif
+    mova                m5, [blockq+ 80]    ; IN(5)
+    mova                m6, [blockq+ 96]    ; IN(6)
+    mova                m7, [blockq+112]    ; IN(7)
+%if ARCH_X86_64
+    mova               m11, [pd_8192]       ; rounding
+%define D_8192_REG m11
+%else
+%define D_8192_REG [pd_8192]
+%endif
     VP9_IDCT8_1D
-    TRANSPOSE8x8W  0, 1, 2, 3, 8, 9, 10, 11, 4
+%if ARCH_X86_64
+    TRANSPOSE8x8W  0, 1, 2, 3, 4, 5, 6, 7, 8
+%else
+    TRANSPOSE8x8W  0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
+    mova        [blockq+0], m0
+%endif
     VP9_IDCT8_1D
 
-    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
-    ZERO_BLOCK      blockq, 16, 8, m4
+%if ARCH_X86_64
+    SWAP                 6, 8
+%endif
+    pxor                m6, m6  ; used for the block reset, and VP9_STORE_2X
     VP9_IDCT8_WRITEOUT
+    ZERO_BLOCK      blockq, 16, 8, m6
     RET
+%undef W_11585x2_REG
 %endmacro
 
-VP9_IDCT_IDCT_8x8_ADD_XMM ssse3
-VP9_IDCT_IDCT_8x8_ADD_XMM avx
+VP9_IDCT_IDCT_8x8_ADD_XMM sse2, 12
+VP9_IDCT_IDCT_8x8_ADD_XMM ssse3, 13
+VP9_IDCT_IDCT_8x8_ADD_XMM avx, 13
 
 ;---------------------------------------------------------------------------------------------
 ; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
 ;---------------------------------------------------------------------------------------------
 
-%macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/8/9/10/11
-    VP9_UNPACK_MULSUB_2D_4X 11,  0,  4,  5, 16305,  1606    ; m11/4=t1[d], m0/5=t0[d]
-    VP9_UNPACK_MULSUB_2D_4X  3,  8,  6, 13, 10394, 12665    ; m3/6=t5[d], m8/13=t4[d]
-    VP9_RND_SH_SUMSUB_BA     8,  0, 13,  5, 14, m7          ; m8=t0[w], m0=t4[w]
-    VP9_RND_SH_SUMSUB_BA     3, 11,  6,  4, 14, m7          ; m3=t1[w], m11=t5[w]
-
-    VP9_UNPACK_MULSUB_2D_4X  9,  2,  4,  5, 14449,  7723    ; m9/4=t3[d], m2/5=t2[d]
-    VP9_UNPACK_MULSUB_2D_4X  1, 10,  6, 13,  4756, 15679    ; m1/6=t7[d], m10/13=t6[d]
-    VP9_RND_SH_SUMSUB_BA    10,  2, 13,  5, 14, m7          ; m10=t2[w], m2=t6[w]
-    VP9_RND_SH_SUMSUB_BA     1,  9,  6,  4, 14, m7          ; m1=t3[w], m9=t7[w]
-
-    ; m8=t0, m3=t1, m10=t2, m1=t3, m0=t4, m11=t5, m2=t6, m9=t7
+; x86-32:
+; - in: m0/3/4/7 are in mem [blockq+N*16]
+; - out: m6 is in mem [blockq+0]
+; x86-64:
+; - everything is in registers
+%macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/4/5/6/7
+%if ARCH_X86_64
+    SWAP                     0, 8
+    SWAP                     3, 9
+    SWAP                     4, 10
+    SWAP                     7, 11
+%endif
 
-    VP9_UNPACK_MULSUB_2D_4X  0, 11,  4,  5, 15137,  6270    ; m0/4=t5[d], m11/5=t4[d]
-    VP9_UNPACK_MULSUB_2D_4X  9,  2,  6, 13,  6270, 15137    ; m9/6=t6[d], m2/13=t7[d]
-    VP9_RND_SH_SUMSUB_BA     9, 11,  6,  5, 14, m7
-    psignw                  m9, [pw_m1]                     ; m9=out1[w], m11=t6[w]
-    VP9_RND_SH_SUMSUB_BA     2,  0, 13,  4, 14, m7          ; m2=out6[w], m0=t7[w]
+    VP9_UNPACK_MULSUB_2D_4X  5,  2,  0,  3, 14449,  7723    ; m5/2=t3[d], m2/4=t2[d]
+    VP9_UNPACK_MULSUB_2D_4X  1,  6,  4,  7,  4756, 15679    ; m1/4=t7[d], m6/7=t6[d]
+    SCRATCH                  4, 12, blockq+1*16
+    VP9_RND_SH_SUMSUB_BA     6,  2,  7,  3, 4, D_8192_REG  ; m6=t2[w], m2=t6[w]
+    UNSCRATCH                4, 12, blockq+1*16
+    VP9_RND_SH_SUMSUB_BA     1,  5,  4,  0, 3, D_8192_REG  ; m1=t3[w], m5=t7[w]
+
+    UNSCRATCH                0,  8, blockq+16*0
+    UNSCRATCH                3,  9, blockq+16*3
+    UNSCRATCH                4, 10, blockq+16*4
+    UNSCRATCH                7, 11, blockq+16*7
+    SCRATCH                  1,  8, blockq+16*1
+    SCRATCH                  2,  9, blockq+16*2
+    SCRATCH                  5, 10, blockq+16*5
+    SCRATCH                  6, 11, blockq+16*6
+
+    VP9_UNPACK_MULSUB_2D_4X  7,  0,  1,  2, 16305,  1606    ; m7/1=t1[d], m0/2=t0[d]
+    VP9_UNPACK_MULSUB_2D_4X  3,  4,  5,  6, 10394, 12665    ; m3/5=t5[d], m4/6=t4[d]
+    SCRATCH                  1, 12, blockq+ 0*16
+    VP9_RND_SH_SUMSUB_BA     4,  0,  6,  2, 1, D_8192_REG  ; m4=t0[w], m0=t4[w]
+    UNSCRATCH                1, 12, blockq+ 0*16
+    VP9_RND_SH_SUMSUB_BA     3,  7,  5,  1, 2, D_8192_REG  ; m3=t1[w], m7=t5[w]
+
+    UNSCRATCH                2,  9, blockq+16*2
+    UNSCRATCH                5, 10, blockq+16*5
+    SCRATCH                  3,  9, blockq+16*3
+    SCRATCH                  4, 10, blockq+16*4
+
+    ; m4=t0, m3=t1, m6=t2, m1=t3, m0=t4, m7=t5, m2=t6, m5=t7
+
+    VP9_UNPACK_MULSUB_2D_4X  0,  7,  1,  3, 15137,  6270    ; m0/1=t5[d], m7/3=t4[d]
+    VP9_UNPACK_MULSUB_2D_4X  5,  2,  4,  6,  6270, 15137    ; m5/4=t6[d], m2/6=t7[d]
+    SCRATCH                  1, 12, blockq+ 0*16
+    VP9_RND_SH_SUMSUB_BA     5,  7,  4,  3, 1, D_8192_REG
+    UNSCRATCH                1, 12, blockq+ 0*16
+    PSIGNW                  m5, W_M1_REG                    ; m5=out1[w], m7=t6[w]
+    VP9_RND_SH_SUMSUB_BA     2,  0,  6,  1, 3, D_8192_REG   ; m2=out6[w], m0=t7[w]
+
+    UNSCRATCH                1,  8, blockq+16*1
+    UNSCRATCH                3,  9, blockq+16*3
+    UNSCRATCH                4, 10, blockq+16*4
+    UNSCRATCH                6, 11, blockq+16*6
+    SCRATCH                  2,  8, blockq+16*0
+
+    SUMSUB_BA                w,  6,  4, 2                   ; m6=out0[w], m4=t2[w]
+    SUMSUB_BA                w,  1,  3, 2
+    PSIGNW                  m1, W_M1_REG                    ; m1=out7[w], m3=t3[w]
+
+    ; m6=out0, m5=out1, m4=t2, m3=t3, m7=t6, m0=t7, m2=out6, m1=out7
+
+%if cpuflag(ssse3)
+    SUMSUB_BA                w,  3,  4,  2
+    SUMSUB_BA                w,  0,  7,  2
+    pmulhrsw                m3, W_11585x2_REG
+    pmulhrsw                m7, W_11585x2_REG
+    pmulhrsw                m4, W_11585x2_REG               ; out4
+    pmulhrsw                m0, W_11585x2_REG               ; out2
+%else
+    SCRATCH                  5,  9, blockq+16*1
+    VP9_UNPACK_MULSUB_2W_4X  4, 3, 11585, 11585, D_8192_REG, 2, 5
+    VP9_UNPACK_MULSUB_2W_4X  7, 0, 11585, 11585, D_8192_REG, 2, 5
+    UNSCRATCH                5,  9, blockq+16*1
+%endif
+    PSIGNW                  m3, W_M1_REG                    ; out3
+    PSIGNW                  m7, W_M1_REG                    ; out5
 
-    SUMSUB_BA                w, 10,  8, 14                  ; m10=out0[w], m8=t2[w]
-    SUMSUB_BA                w,  1,  3, 14
-    psignw                  m1, [pw_m1]                     ; m1=out7[w], m3=t3[w]
+    ; m6=out0, m5=out1, m0=out2, m3=out3, m4=out4, m7=out5, m2=out6, m1=out7
 
-    ; m10=out0, m9=out1, m8=t2, m3=t3, m11=t6, m0=t7, m2=out6, m1=out7
+%if ARCH_X86_64
+    SWAP                     2, 8
+%endif
+    SWAP                     0, 6, 2
+    SWAP                     7, 1, 5
+%endmacro
 
-    SUMSUB_BA                w,  3,  8,  4
-    SUMSUB_BA                w,  0, 11,  5
-    pmulhrsw                m3, m12
-    pmulhrsw               m11, m12
-    pmulhrsw                m8, m12                         ; out4
-    pmulhrsw                m0, m12                         ; out2
-    psignw                  m3, [pw_m1]                     ; out3
-    psignw                 m11, [pw_m1]                     ; out5
+%macro IADST8_FN 6
+INIT_XMM %5
+cglobal vp9_%1_%3_8x8_add, 3, 3, %6, dst, stride, block, eob
 
-    ; m10=out0, m9=out1, m0=out2, m3=out3, m8=out4, m11=out5, m2=out6, m1=out7
+%ifidn %1, idct
+%define first_is_idct 1
+%else
+%define first_is_idct 0
+%endif
 
-    SWAP                     0, 10, 2
-    SWAP                    11,  1, 9
-%endmacro
+%ifidn %3, idct
+%define second_is_idct 1
+%else
+%define second_is_idct 0
+%endif
 
-%macro IADST8_FN 5
-INIT_XMM %5
-cglobal vp9_%1_%3_8x8_add, 3, 3, 15, dst, stride, block, eob
+%if ARCH_X86_64
     mova                m0, [blockq+  0]    ; IN(0)
+%endif
     mova                m1, [blockq+ 16]    ; IN(1)
     mova                m2, [blockq+ 32]    ; IN(2)
+%if ARCH_X86_64 || first_is_idct
     mova                m3, [blockq+ 48]    ; IN(3)
-    mova                m8, [blockq+ 64]    ; IN(4)
-    mova                m9, [blockq+ 80]    ; IN(5)
-    mova               m10, [blockq+ 96]    ; IN(6)
-    mova               m11, [blockq+112]    ; IN(7)
+%endif
+%if ARCH_X86_64
+    mova                m4, [blockq+ 64]    ; IN(4)
+%endif
+    mova                m5, [blockq+ 80]    ; IN(5)
+    mova                m6, [blockq+ 96]    ; IN(6)
+%if ARCH_X86_64 || first_is_idct
+    mova                m7, [blockq+112]    ; IN(7)
+%endif
+%if ARCH_X86_64
+%if cpuflag(ssse3)
+    mova               m15, [pw_11585x2]    ; often used
+%endif
+    mova               m13, [pd_8192]       ; rounding
+    mova               m14, [pw_m1]
+%define W_11585x2_REG m15
+%define D_8192_REG m13
+%define W_M1_REG m14
+%else
+%define W_11585x2_REG [pw_11585x2]
+%define D_8192_REG [pd_8192]
+%define W_M1_REG [pw_m1]
+%endif
 
-    mova               m12, [pw_11585x2]    ; often used
-    mova                m7, [pd_8192]       ; rounding
+    ; note different calling conventions for idct8 vs. iadst8 on x86-32
     VP9_%2_1D
-    TRANSPOSE8x8W  0, 1, 2, 3, 8, 9, 10, 11, 4
+%if ARCH_X86_64
+    TRANSPOSE8x8W  0, 1, 2, 3, 4, 5, 6, 7, 8
+%else
+    TRANSPOSE8x8W  0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
+    mova      [blockq+  0], m0
+%if second_is_idct == 0
+    mova      [blockq+ 48], m3
+    mova      [blockq+112], m7
+%endif
+%endif
     VP9_%4_1D
 
-    pxor                m4, m4  ; used for the block reset, and VP9_STORE_2X
-    ZERO_BLOCK      blockq, 16, 8, m4
+%if ARCH_X86_64
+    SWAP                 6, 8
+%endif
+    pxor                m6, m6  ; used for the block reset, and VP9_STORE_2X
     VP9_IDCT8_WRITEOUT
+    ZERO_BLOCK      blockq, 16, 8, m6
     RET
+
+%undef W_11585x2_REG
+%undef first_is_idct
+%undef second_is_idct
+
 %endmacro
 
-IADST8_FN idct,  IDCT8,  iadst, IADST8, ssse3
-IADST8_FN idct,  IDCT8,  iadst, IADST8, avx
-IADST8_FN iadst, IADST8, idct,  IDCT8,  ssse3
-IADST8_FN iadst, IADST8, idct,  IDCT8,  avx
-IADST8_FN iadst, IADST8, iadst, IADST8, ssse3
-IADST8_FN iadst, IADST8, iadst, IADST8, avx
+%define PSIGNW PSIGNW_MMX
+IADST8_FN idct,  IDCT8,  iadst, IADST8, sse2, 15
+IADST8_FN iadst, IADST8, idct,  IDCT8,  sse2, 15
+IADST8_FN iadst, IADST8, iadst, IADST8, sse2, 15
+%define PSIGNW PSIGNW_SSSE3
+IADST8_FN idct,  IDCT8,  iadst, IADST8, ssse3, 16
+IADST8_FN idct,  IDCT8,  iadst, IADST8, avx, 16
+IADST8_FN iadst, IADST8, idct,  IDCT8,  ssse3, 16
+IADST8_FN iadst, IADST8, idct,  IDCT8,  avx, 16
+IADST8_FN iadst, IADST8, iadst, IADST8, ssse3, 16
+IADST8_FN iadst, IADST8, iadst, IADST8, avx, 16
+%undef PSIGNW
 
 ;---------------------------------------------------------------------------------------------
 ; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
 ;---------------------------------------------------------------------------------------------
 
-; at the end of this macro, m7 is stored in stack_scratch
+; x86-64:
+; at the end of this macro, m7 is stored in [%4+15*%5]
 ; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
 ; the following sumsubs have not been done yet:
 ;    SUMSUB_BA            w,  6,  9, 15      ; t6, t9
 ;    SUMSUB_BA            w,  7,  8, 15      ; t7, t8
-%macro VP9_IDCT16_1D_START 4 ; src, nnzc, stride, stack_scratch
+; or (x86-32) t0-t5 are in m0-m5, t10-t15 are in x11/9/7/5/3/1,
+; and the following simsubs have not been done yet:
+;    SUMSUB_BA            w, x13, x14, 7       ; t6, t9
+;    SUMSUB_BA            w, x15, x12, 7       ; t7, t8
+
+%macro VP9_IDCT16_1D_START 5 ; src, nnzc, stride, scratch, scratch_stride
 %if %2 <= 4
     mova                m3, [%1+ 1*%3]      ; IN(1)
-    mova               m12, [%1+ 2*%3]      ; IN(2)
     mova                m0, [%1+ 3*%3]      ; IN(3)
 
-    pmulhrsw           m15, m12, [pw_16069x2]       ; t6-7
-    pmulhrsw           m12, [pw_3196x2]             ; t4-5
     pmulhrsw            m4, m3,  [pw_16305x2]       ; t14-15
     pmulhrsw            m3, [pw_1606x2]             ; t8-9
     pmulhrsw            m7, m0,  [pw_m4756x2]       ; t10-11
@@ -629,140 +1006,216 @@ IADST8_FN iadst, IADST8, iadst, IADST8, avx
     ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
     ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
 
-    paddw              m14, m15, m12
-    psubw              m13, m15, m12
-    pmulhrsw           m13, [pw_11585x2]            ; t5
-    pmulhrsw           m14, [pw_11585x2]            ; t6
-
-    VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137,  6270, [pd_8192], 10, 11 ; t9,  t14
-    VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 10, 11 ; t10, t13
+    VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137,  6270, [pd_8192], 1, 6 ; t9,  t14
+    SCRATCH              4, 10, %4+ 1*%5
+    SCRATCH              5, 11, %4+ 7*%5
+    VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
+    UNSCRATCH            5, 11, %4+ 7*%5
 
     ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
     ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
 %else
-    mova                m5, [%1+ 1*%3]     ; IN(1)
-    mova               m14, [%1+ 2*%3]     ; IN(2)
-    mova                m6, [%1+ 3*%3]     ; IN(3)
-    mova                m9, [%1+ 4*%3]     ; IN(4)
-    mova                m7, [%1+ 5*%3]     ; IN(5)
-    mova               m15, [%1+ 6*%3]     ; IN(6)
-    mova                m4, [%1+ 7*%3]     ; IN(7)
+    mova                m5, [%1+ 1*%3]      ; IN(1)
+    mova                m4, [%1+ 7*%3]      ; IN(7)
 %if %2 <= 8
-    pmulhrsw            m8, m9,  [pw_15137x2]       ; t3
-    pmulhrsw            m9, [pw_6270x2]             ; t2
-    pmulhrsw           m13, m14, [pw_16069x2]       ; t7
-    pmulhrsw           m14, [pw_3196x2]             ; t4
-    pmulhrsw           m12, m15, [pw_m9102x2]       ; t5
-    pmulhrsw           m15, [pw_13623x2]            ; t6
     pmulhrsw            m2, m5,  [pw_16305x2]       ; t15
     pmulhrsw            m5, [pw_1606x2]             ; t8
     pmulhrsw            m3, m4,  [pw_m10394x2]      ; t9
     pmulhrsw            m4, [pw_12665x2]            ; t14
+%else
+    mova                m3, [%1+ 9*%3]      ; IN(9)
+    mova                m2, [%1+15*%3]      ; IN(15)
+
+    ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
+    ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
+
+    VP9_UNPACK_MULSUB_2W_4X   5,   2, 16305,  1606, [pd_8192], 0, 1 ; t8,  t15
+    VP9_UNPACK_MULSUB_2W_4X   3,   4, 10394, 12665, [pd_8192], 0, 1 ; t9,  t14
+%endif
+
+    SUMSUB_BA            w,  3,  5, 0       ; t8,  t9
+    SUMSUB_BA            w,  4,  2, 0       ; t15, t14
+
+    VP9_UNPACK_MULSUB_2W_4X   2,   5, 15137,  6270, [pd_8192], 0, 1 ; t9,  t14
+
+    SCRATCH              4, 10, %4+ 1*%5
+    SCRATCH              5, 11, %4+ 7*%5
+
+    mova                m6, [%1+ 3*%3]      ; IN(3)
+    mova                m7, [%1+ 5*%3]      ; IN(5)
+%if %2 <= 8
     pmulhrsw            m0, m7,  [pw_14449x2]       ; t13
     pmulhrsw            m7, [pw_7723x2]             ; t10
     pmulhrsw            m1, m6,  [pw_m4756x2]       ; t11
     pmulhrsw            m6, [pw_15679x2]            ; t12
 %else
-    mova                m3, [%1+ 9*%3]     ; IN(9)
-    mova               m12, [%1+10*%3]     ; IN(10)
-    mova                m0, [%1+11*%3]     ; IN(11)
-    mova                m8, [%1+12*%3]     ; IN(12)
-    mova                m1, [%1+13*%3]     ; IN(13)
-    mova               m13, [%1+14*%3]     ; IN(14)
-    mova                m2, [%1+15*%3]     ; IN(15)
-
-    ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
-    ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
+    mova                m0, [%1+11*%3]      ; IN(11)
+    mova                m1, [%1+13*%3]      ; IN(13)
 
-    VP9_UNPACK_MULSUB_2W_4X   9,   8, 15137,  6270, [pd_8192], 10, 11 ; t2,  t3
-    VP9_UNPACK_MULSUB_2W_4X  14,  13, 16069,  3196, [pd_8192], 10, 11 ; t4,  t7
-    VP9_UNPACK_MULSUB_2W_4X  12,  15,  9102, 13623, [pd_8192], 10, 11 ; t5,  t6
-    VP9_UNPACK_MULSUB_2W_4X   5,   2, 16305,  1606, [pd_8192], 10, 11 ; t8,  t15
-    VP9_UNPACK_MULSUB_2W_4X   3,   4, 10394, 12665, [pd_8192], 10, 11 ; t9,  t14
-    VP9_UNPACK_MULSUB_2W_4X   7,   0, 14449,  7723, [pd_8192], 10, 11 ; t10, t13
-    VP9_UNPACK_MULSUB_2W_4X   1,   6,  4756, 15679, [pd_8192], 10, 11 ; t11, t12
+    VP9_UNPACK_MULSUB_2W_4X   7,   0, 14449,  7723, [pd_8192], 4, 5 ; t10, t13
+    VP9_UNPACK_MULSUB_2W_4X   1,   6,  4756, 15679, [pd_8192], 4, 5 ; t11, t12
 %endif
 
     ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
     ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
 
-    SUMSUB_BA            w, 12, 14, 10      ; t4,  t5
-    SUMSUB_BA            w, 15, 13, 10      ; t7,  t6
-    SUMSUB_BA            w,  3,  5, 10      ; t8,  t9
-    SUMSUB_BA            w,  7,  1, 10      ; t11, t10
-    SUMSUB_BA            w,  0,  6, 10      ; t12, t13
-    SUMSUB_BA            w,  4,  2, 10      ; t15, t14
+    SUMSUB_BA            w,  7,  1, 4       ; t11, t10
+    SUMSUB_BA            w,  0,  6, 4       ; t12, t13
 
     ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
     ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
 
-    SUMSUB_BA            w, 14, 13, 10
-    pmulhrsw           m13, [pw_11585x2]                              ; t5
-    pmulhrsw           m14, [pw_11585x2]                              ; t6
-    VP9_UNPACK_MULSUB_2W_4X   2,   5, 15137,  6270, [pd_8192], 10, 11 ; t9,  t14
-    VP9_UNPACK_MULSUB_2W_4X   6,   1, 6270, m15137, [pd_8192], 10, 11 ; t10, t13
+    VP9_UNPACK_MULSUB_2W_4X   6,   1, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
+
+    UNSCRATCH            5, 11, %4+ 7*%5
 %endif
 
     ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
     ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
 
-    SUMSUB_BA            w,  7,  3, 10      ; t8,  t11
-    SUMSUB_BA            w,  6,  2, 10      ; t9,  t10
-    SUMSUB_BA            w,  0,  4, 10      ; t15, t12
-    SUMSUB_BA            w,  1,  5, 10      ; t14. t13
+    SUMSUB_BA            w,  7,  3, 4       ; t8,  t11
+
+    ; backup first register
+    mova        [%4+15*%5], m7
+
+    SUMSUB_BA            w,  6,  2, 7       ; t9,  t10
+    UNSCRATCH            4, 10, %4+ 1*%5
+    SUMSUB_BA            w,  0,  4, 7       ; t15, t12
+    SUMSUB_BA            w,  1,  5, 7       ; t14. t13
 
     ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
     ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
 
-    SUMSUB_BA            w,  2,  5, 10
-    SUMSUB_BA            w,  3,  4, 10
+%if cpuflag(ssse3)
+    SUMSUB_BA            w,  2,  5, 7
+    SUMSUB_BA            w,  3,  4, 7
     pmulhrsw            m5, [pw_11585x2]    ; t10
     pmulhrsw            m4, [pw_11585x2]    ; t11
     pmulhrsw            m3, [pw_11585x2]    ; t12
     pmulhrsw            m2, [pw_11585x2]    ; t13
-
-    ; backup first register
-    mova              [%4], m7
+%else
+    SCRATCH              6, 10, %4+ 1*%5
+    VP9_UNPACK_MULSUB_2W_4X   5,   2, 11585, 11585, [pd_8192], 6, 7 ; t10, t13
+    VP9_UNPACK_MULSUB_2W_4X   4,   3, 11585, 11585, [pd_8192], 6, 7 ; t11, t12
+    UNSCRATCH            6, 10, %4+ 1*%5
+%endif
 
     ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
     ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
 
-    ; from load/start
+    SCRATCH              0,  8, %4+ 1*%5
+    SCRATCH              1,  9, %4+ 3*%5
+    SCRATCH              2, 10, %4+ 5*%5
+    SCRATCH              3, 11, %4+ 7*%5
+    SCRATCH              4, 12, %4+ 9*%5
+    SCRATCH              5, 13, %4+11*%5
+    SCRATCH              6, 14, %4+13*%5
+
+    ; even (tx8x8)
 %if %2 <= 4
-    mova               m11, [%1+ 0*%3]      ; IN(0)
-    pmulhrsw           m11, [pw_11585x2]    ; t0-t3
+    mova                m3, [%1+ 0*%3]      ; IN(0)
+    mova                m4, [%1+ 2*%3]      ; IN(2)
+
+    pmulhrsw            m3, [pw_11585x2]    ; t0-t3
+    pmulhrsw            m7, m4, [pw_16069x2]        ; t6-7
+    pmulhrsw            m4, [pw_3196x2]             ; t4-5
+
+    paddw               m6, m7, m4
+    psubw               m5, m7, m4
+    pmulhrsw            m5, [pw_11585x2]            ; t5
+    pmulhrsw            m6, [pw_11585x2]            ; t6
+
+    psubw               m0, m3, m7
+    paddw               m7, m3
+    psubw               m1, m3, m6
+    paddw               m6, m3
+    psubw               m2, m3, m5
+    paddw               m5, m3
+
+%if ARCH_X86_32
+    SWAP                 0, 7
+%endif
+    SCRATCH              7, 15, %4+12*%5
+%else
+    mova                m6, [%1+ 2*%3]      ; IN(2)
+    mova                m1, [%1+ 4*%3]      ; IN(4)
+    mova                m7, [%1+ 6*%3]      ; IN(6)
+%if %2 <= 8
+    pmulhrsw            m0, m1,  [pw_15137x2]       ; t3
+    pmulhrsw            m1, [pw_6270x2]             ; t2
+    pmulhrsw            m5, m6, [pw_16069x2]        ; t7
+    pmulhrsw            m6, [pw_3196x2]             ; t4
+    pmulhrsw            m4, m7, [pw_m9102x2]        ; t5
+    pmulhrsw            m7, [pw_13623x2]            ; t6
+%else
+    mova                m4, [%1+10*%3]      ; IN(10)
+    mova                m0, [%1+12*%3]      ; IN(12)
+    mova                m5, [%1+14*%3]      ; IN(14)
+
+    VP9_UNPACK_MULSUB_2W_4X   1,   0, 15137,  6270, [pd_8192], 2, 3 ; t2,  t3
+    VP9_UNPACK_MULSUB_2W_4X   6,   5, 16069,  3196, [pd_8192], 2, 3 ; t4,  t7
+    VP9_UNPACK_MULSUB_2W_4X   4,   7,  9102, 13623, [pd_8192], 2, 3 ; t5,  t6
+%endif
+
+    SUMSUB_BA            w,  4,  6, 2       ; t4,  t5
+    SUMSUB_BA            w,  7,  5, 2       ; t7,  t6
 
-    psubw               m8, m11, m15
-    paddw              m15, m11
-    psubw               m9, m11, m14
-    paddw              m14, m11
-    psubw              m10, m11, m13
-    paddw              m13, m11
+%if cpuflag(ssse3)
+    SUMSUB_BA            w,  6,  5, 2
+    pmulhrsw            m5, [pw_11585x2]                              ; t5
+    pmulhrsw            m6, [pw_11585x2]                              ; t6
 %else
-    mova               m10, [%1+ 0*%3]      ; IN(0)
+    VP9_UNPACK_MULSUB_2W_4X  5,  6, 11585, 11585, [pd_8192], 2, 3 ; t5,  t6
+%endif
+
+    SCRATCH              5, 15, %4+10*%5
+    mova                m2, [%1+ 0*%3]      ; IN(0)
 %if %2 <= 8
-    pmulhrsw           m10, [pw_11585x2]    ; t0 and t1
-    psubw              m11, m10, m8
-    paddw               m8, m10
+    pmulhrsw            m2, [pw_11585x2]    ; t0 and t1
+    psubw               m3, m2, m0
+    paddw               m0, m2
+
+    SUMSUB_BA            w,  7,  0, 5       ; t0,  t7
 %else
-    mova               m11, [%1+ 8*%3]      ; IN(8)
+    mova                m3, [%1+ 8*%3]      ; IN(8)
 
     ; from 3 stages back
-    SUMSUB_BA            w, 11, 10, 7
-    pmulhrsw           m11, [pw_11585x2]    ; t0
-    pmulhrsw           m10, [pw_11585x2]    ; t1
+%if cpuflag(ssse3)
+    SUMSUB_BA            w,  3,  2, 5
+    pmulhrsw            m3, [pw_11585x2]    ; t0
+    pmulhrsw            m2, [pw_11585x2]    ; t1
+%else
+    mova        [%1+ 0*%3], m0
+    VP9_UNPACK_MULSUB_2W_4X  2,  3, 11585,  11585, [pd_8192], 5, 0 ; t0, t1
+    mova                m0, [%1+ 0*%3]
+%endif
 
     ; from 2 stages back
-    SUMSUB_BA            w,  8, 11, 7       ; t0,  t3
+    SUMSUB_BA            w,  0,  3, 5      ; t0,  t3
+
+    SUMSUB_BA            w,  7,  0, 5      ; t0,  t7
+%endif
+    UNSCRATCH            5, 15, %4+10*%5
+%if ARCH_X86_32
+    SWAP                 0, 7
 %endif
-    SUMSUB_BA            w,  9, 10, 7       ; t1,  t2
+    SCRATCH              7, 15, %4+12*%5
+    SUMSUB_BA            w,  1,  2, 7       ; t1,  t2
 
     ; from 1 stage back
-    SUMSUB_BA            w, 15,  8, 7       ; t0,  t7
-    SUMSUB_BA            w, 14,  9, 7       ; t1,  t6
-    SUMSUB_BA            w, 13, 10, 7       ; t2,  t5
+    SUMSUB_BA            w,  6,  1, 7       ; t1,  t6
+    SUMSUB_BA            w,  5,  2, 7       ; t2,  t5
 %endif
-    SUMSUB_BA            w, 12, 11, 7       ; t3,  t4
+    SUMSUB_BA            w,  4,  3, 7       ; t3,  t4
+
+%if ARCH_X86_64
+    SWAP                 0, 8
+    SWAP                 1, 9
+    SWAP                 2, 10
+    SWAP                 3, 11
+    SWAP                 4, 12
+    SWAP                 5, 13
+    SWAP                 6, 14
 
     SUMSUB_BA            w,  0, 15, 7       ; t0, t15
     SUMSUB_BA            w,  1, 14, 7       ; t1, t14
@@ -770,15 +1223,36 @@ IADST8_FN iadst, IADST8, iadst, IADST8, avx
     SUMSUB_BA            w,  3, 12, 7       ; t3, t12
     SUMSUB_BA            w,  4, 11, 7       ; t4, t11
     SUMSUB_BA            w,  5, 10, 7       ; t5, t10
+%else
+    SWAP                 1, 6
+    SWAP                 2, 5
+    SWAP                 3, 4
+    mova        [%4+14*%5], m6
+
+%macro %%SUMSUB_BA_STORE 5 ; reg, from_mem, to_mem, scratch, scratch_stride
+    mova                m6, [%4+%2*%5]
+    SUMSUB_BA            w,  6, %1, 7
+    SWAP                %1, 6
+    mova        [%4+%3*%5], m6
 %endmacro
 
-%macro VP9_IDCT16_1D 2-3 16 ; src, pass, nnzc
-    VP9_IDCT16_1D_START %1, %3, 32, tmpq+32
+    %%SUMSUB_BA_STORE    0,  1,  1, %4, %5  ; t0, t15
+    %%SUMSUB_BA_STORE    1,  3,  3, %4, %5  ; t1, t14
+    %%SUMSUB_BA_STORE    2,  5,  5, %4, %5  ; t2, t13
+    %%SUMSUB_BA_STORE    3,  7,  7, %4, %5  ; t3, t12
+    %%SUMSUB_BA_STORE    4,  9,  9, %4, %5  ; t4, t11
+    %%SUMSUB_BA_STORE    5, 11, 11, %4, %5  ; t5, t10
+%endif
+%endmacro
 
+%macro VP9_IDCT16_1D 2-3 16 ; src, pass, nnzc
 %if %2 == 1
+    VP9_IDCT16_1D_START %1, %3, 32, tmpq, 16
+
+%if ARCH_X86_64
     ; backup a different register
-    mova         [tmpq+16], m15
-    mova                m7, [tmpq+32]
+    mova                m7, [tmpq+15*16]
+    mova      [tmpq+ 1*16], m15
 
     SUMSUB_BA            w,  6,  9, 15      ; t6, t9
     SUMSUB_BA            w,  7,  8, 15      ; t7, t8
@@ -793,7 +1267,7 @@ IADST8_FN iadst, IADST8, iadst, IADST8, avx
     mova        [tmpq+192], m6
     mova        [tmpq+224], m7
 
-    mova               m15, [tmpq+16]
+    mova               m15, [tmpq+ 1*16]
     TRANSPOSE8x8W        8, 9, 10, 11, 12, 13, 14, 15, 0
     mova        [tmpq+ 16], m8
     mova        [tmpq+ 48], m9
@@ -803,53 +1277,125 @@ IADST8_FN iadst, IADST8, iadst, IADST8, avx
     mova        [tmpq+176], m13
     mova        [tmpq+208], m14
     mova        [tmpq+240], m15
+%else
+    mova                m6, [tmpq+13*16]
+    mova                m7, [tmpq+14*16]
+    SUMSUB_BA            w, 6, 7                ; t6, t9
+    mova      [tmpq+14*16], m6
+    mova      [tmpq+13*16], m7
+    mova                m7, [tmpq+15*16]
+    mova                m6, [tmpq+12*16]
+    SUMSUB_BA            w, 7, 6                ; t7, t8
+    mova      [tmpq+15*16], m6
+
+    TRANSPOSE8x8W       0, 1, 2, 3, 4, 5, 6, 7, [tmpq+14*16], [tmpq+ 8*16], 1
+    mova     [tmpq+ 0*16], m0
+    mova     [tmpq+ 2*16], m1
+    mova     [tmpq+ 4*16], m2
+    mova     [tmpq+ 6*16], m3
+    mova     [tmpq+10*16], m5
+    mova     [tmpq+12*16], m6
+    mova     [tmpq+14*16], m7
+
+    mova                m0, [tmpq+15*16]
+    mova                m1, [tmpq+13*16]
+    mova                m2, [tmpq+11*16]
+    mova                m3, [tmpq+ 9*16]
+    mova                m4, [tmpq+ 7*16]
+    mova                m5, [tmpq+ 5*16]
+    mova                m7, [tmpq+ 1*16]
+    TRANSPOSE8x8W       0, 1, 2, 3, 4, 5, 6, 7, [tmpq+ 3*16], [tmpq+ 9*16], 1
+    mova     [tmpq+ 1*16], m0
+    mova     [tmpq+ 3*16], m1
+    mova     [tmpq+ 5*16], m2
+    mova     [tmpq+ 7*16], m3
+    mova     [tmpq+11*16], m5
+    mova     [tmpq+13*16], m6
+    mova     [tmpq+15*16], m7
+%endif
 %else ; %2 == 2
-    ; backup more registers
-    mova         [tmpq+64], m8
-    mova         [tmpq+96], m9
+    VP9_IDCT16_1D_START %1, %3, 32, %1, 32
+
+%if cpuflag(ssse3)
+%define ROUND_REG [pw_512]
+%else
+%define ROUND_REG [pw_32]
+%endif
 
     pxor                m7, m7
-    pmulhrsw            m0, [pw_512]
-    pmulhrsw            m1, [pw_512]
-    VP9_STORE_2X         0,  1,  8,  9,  7
+%if ARCH_X86_64
+    ; backup more registers
+    mova        [%1+ 2*32], m8
+    mova        [%1+ 3*32], m9
+
+    VP9_IDCT8_WRITEx2    0,  1, 8, 9, 7, ROUND_REG, 6
     lea               dstq, [dstq+strideq*2]
-    pmulhrsw            m2, [pw_512]
-    pmulhrsw            m3, [pw_512]
-    VP9_STORE_2X         2,  3,  8,  9,  7
+    VP9_IDCT8_WRITEx2    2,  3, 8, 9, 7, ROUND_REG, 6
     lea               dstq, [dstq+strideq*2]
-    pmulhrsw            m4, [pw_512]
-    pmulhrsw            m5, [pw_512]
-    VP9_STORE_2X         4,  5,  8,  9,  7
+    VP9_IDCT8_WRITEx2    4,  5, 8, 9, 7, ROUND_REG, 6
     lea               dstq, [dstq+strideq*2]
 
     ; restore from cache
     SWAP                 0, 7               ; move zero from m7 to m0
-    mova                m7, [tmpq+32]
-    mova                m8, [tmpq+64]
-    mova                m9, [tmpq+96]
+    mova                m7, [%1+15*32]
+    mova                m8, [%1+ 2*32]
+    mova                m9, [%1+ 3*32]
+
+    SUMSUB_BA            w,  6,  9, 3       ; t6, t9
+    SUMSUB_BA            w,  7,  8, 3       ; t7, t8
+
+    VP9_IDCT8_WRITEx2    6,  7, 3, 4, 0, ROUND_REG, 6
+    lea               dstq, [dstq+strideq*2]
+    VP9_IDCT8_WRITEx2    8,  9, 3, 4, 0, ROUND_REG, 6
+    lea               dstq, [dstq+strideq*2]
+    VP9_IDCT8_WRITEx2   10, 11, 1, 2, 0, ROUND_REG, 6
+    lea               dstq, [dstq+strideq*2]
+    VP9_IDCT8_WRITEx2   12, 13, 1, 2, 0, ROUND_REG, 6
+    lea               dstq, [dstq+strideq*2]
+    VP9_IDCT8_WRITEx2   14, 15, 1, 2, 0, ROUND_REG, 6
+%else
+    mova      [tmpq+ 0*32], m5
+
+    VP9_IDCT8_WRITEx2    0,  1, 5, 6, 7, ROUND_REG, 6
+    lea               dstq, [dstq+strideq*2]
+    VP9_IDCT8_WRITEx2    2,  3, 5, 6, 7, ROUND_REG, 6
+    lea               dstq, [dstq+strideq*2]
+
+    SWAP                 0, 7               ; move zero from m7 to m0
+    mova                m5, [tmpq+ 0*32]
+
+    VP9_IDCT8_WRITEx2    4,  5, 1, 2, 0, ROUND_REG, 6
+    lea               dstq, [dstq+strideq*2]
 
-    SUMSUB_BA            w,  6,  9, 1       ; t6, t9
-    SUMSUB_BA            w,  7,  8, 1       ; t7, t8
+    mova                m4, [tmpq+13*32]
+    mova                m7, [tmpq+14*32]
+    mova                m5, [tmpq+15*32]
+    mova                m6, [tmpq+12*32]
+    SUMSUB_BADC w, 4, 7, 5, 6, 1
 
-    pmulhrsw            m6, [pw_512]
-    pmulhrsw            m7, [pw_512]
-    VP9_STORE_2X         6,  7,  1,  2,  0
+    VP9_IDCT8_WRITEx2    4,  5, 1, 2, 0, ROUND_REG, 6
     lea               dstq, [dstq+strideq*2]
-    pmulhrsw            m8, [pw_512]
-    pmulhrsw            m9, [pw_512]
-    VP9_STORE_2X         8,  9,  1,  2,  0
+    VP9_IDCT8_WRITEx2    6,  7, 1, 2, 0, ROUND_REG, 6
     lea               dstq, [dstq+strideq*2]
-    pmulhrsw           m10, [pw_512]
-    pmulhrsw           m11, [pw_512]
-    VP9_STORE_2X        10, 11,  1,  2,  0
+
+    mova                m4, [tmpq+11*32]
+    mova                m5, [tmpq+ 9*32]
+    mova                m6, [tmpq+ 7*32]
+    mova                m7, [tmpq+ 5*32]
+
+    VP9_IDCT8_WRITEx2    4,  5, 1, 2, 0, ROUND_REG, 6
     lea               dstq, [dstq+strideq*2]
-    pmulhrsw           m12, [pw_512]
-    pmulhrsw           m13, [pw_512]
-    VP9_STORE_2X        12, 13,  1,  2,  0
+    VP9_IDCT8_WRITEx2    6,  7, 1, 2, 0, ROUND_REG, 6
     lea               dstq, [dstq+strideq*2]
-    pmulhrsw           m14, [pw_512]
-    pmulhrsw           m15, [pw_512]
-    VP9_STORE_2X        14, 15,  1,  2,  0
+
+    mova                m4, [tmpq+ 3*32]
+    mova                m5, [tmpq+ 1*32]
+
+    VP9_IDCT8_WRITEx2    4,  5, 1, 2, 0, ROUND_REG, 6
+    lea               dstq, [dstq+strideq*2]
+%endif
+
+%undef ROUND_REG
 %endif ; %2 == 1/2
 %endmacro
 
@@ -873,19 +1419,38 @@ IADST8_FN iadst, IADST8, iadst, IADST8, avx
 %macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
 INIT_XMM %1
 cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
+%if cpuflag(ssse3)
     ; 2x2=eob=3, 4x4=eob=10
     cmp eobd, 38
     jg .idctfull
     cmp eobd, 1 ; faster path for when only DC is set
     jne .idct8x8
+%else
+    cmp eobd, 1 ; faster path for when only DC is set
+    jg .idctfull
+%endif
 
     ; dc-only
+%if cpuflag(ssse3)
     movd                m0, [blockq]
     mova                m1, [pw_11585x2]
     pmulhrsw            m0, m1
     pmulhrsw            m0, m1
+%else
+    DEFINE_ARGS dst, stride, block, coef
+    movsx            coefd, word [blockq]
+    imul             coefd, 11585
+    add              coefd, 8192
+    sar              coefd, 14
+    imul             coefd, 11585
+    add              coefd, (32 << 14) + 8192
+    sar              coefd, 14 + 6
+    movd                m0, coefd
+%endif
     SPLATW              m0, m0, q0000
+%if cpuflag(ssse3)
     pmulhrsw            m0, [pw_512]
+%endif
     pxor                m5, m5
     movd          [blockq], m5
 %rep 7
@@ -896,6 +1461,7 @@ cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
     RET
 
     DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
+%if cpuflag(ssse3)
 .idct8x8:
     mov               tmpq, rsp
     VP9_IDCT16_1D   blockq, 1, 8
@@ -913,6 +1479,7 @@ cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
     ; use that to zero out block coefficients
     ZERO_BLOCK      blockq, 32, 8, m0
     RET
+%endif
 
 .idctfull:
     mov               cntd, 2
@@ -941,6 +1508,7 @@ cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
     RET
 %endmacro
 
+VP9_IDCT_IDCT_16x16_ADD_XMM sse2
 VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
 VP9_IDCT_IDCT_16x16_ADD_XMM avx
 
@@ -952,188 +1520,326 @@ VP9_IDCT_IDCT_16x16_ADD_XMM avx
 %assign %%str 16*%2
     mova                m0, [%1+ 0*32]  ; in0
     mova                m1, [%1+15*32]  ; in15
-    mova                m8, [%1+ 7*32]  ; in7
-    mova                m9, [%1+ 8*32]  ; in8
-
-    VP9_UNPACK_MULSUB_2D_4X  1,  0,  2,  3, 16364,   804    ; m1/2=t1[d], m0/3=t0[d]
-    VP9_UNPACK_MULSUB_2D_4X  8,  9, 11, 10, 11003, 12140    ; m8/11=t9[d], m9/10=t8[d]
-    VP9_RND_SH_SUMSUB_BA     9,  0, 10,  3,  4, [pd_8192]   ; m9=t0[w], m0=t8[w]
-    VP9_RND_SH_SUMSUB_BA     8,  1, 11,  2,  4, [pd_8192]   ; m8=t1[w], m1=t9[w]
-
-    mova               m11, [%1+ 2*32]  ; in2
-    mova               m10, [%1+13*32]  ; in13
+    mova                m2, [%1+ 7*32]  ; in7
+    mova                m3, [%1+ 8*32]  ; in8
+
+    VP9_UNPACK_MULSUB_2D_4X  1,  0,  4,  5, 16364,   804    ; m1/4=t1[d], m0/5=t0[d]
+    VP9_UNPACK_MULSUB_2D_4X  2,  3,  7,  6, 11003, 12140    ; m2/7=t9[d], m3/6=t8[d]
+    SCRATCH              4, 8, tmpq+ 0*%%str
+    VP9_RND_SH_SUMSUB_BA     3,  0,  6,  5,  4, [pd_8192]   ; m3=t0[w], m0=t8[w]
+    UNSCRATCH            4, 8, tmpq+ 0*%%str
+    VP9_RND_SH_SUMSUB_BA     2,  1,  7,  4,  5, [pd_8192]   ; m2=t1[w], m1=t9[w]
+
+    SCRATCH              0, 10, tmpq+ 0*%%str
+    SCRATCH              1, 11, tmpq+15*%%str
+    mova   [tmpq+ 7*%%str], m2
+    mova   [tmpq+ 8*%%str], m3
+
+    mova                m1, [%1+ 2*32]  ; in2
+    mova                m0, [%1+13*32]  ; in13
     mova                m3, [%1+ 5*32]  ; in5
     mova                m2, [%1+10*32]  ; in10
 
-    VP9_UNPACK_MULSUB_2D_4X 10, 11,  6,  7, 15893,  3981    ; m10/6=t3[d], m11/7=t2[d]
+    VP9_UNPACK_MULSUB_2D_4X  0,  1,  6,  7, 15893,  3981    ; m0/6=t3[d], m1/7=t2[d]
     VP9_UNPACK_MULSUB_2D_4X  3,  2,  4,  5,  8423, 14053    ; m3/4=t11[d], m2/5=t10[d]
-    VP9_RND_SH_SUMSUB_BA     2, 11,  5,  7, 12, [pd_8192]   ; m2=t2[w], m11=t10[w]
-    VP9_RND_SH_SUMSUB_BA     3, 10,  4,  6, 12, [pd_8192]   ; m3=t3[w], m10=t11[w]
-
-    mova   [tmpq+ 0*%%str], m9          ; make some scratch space (t0:m9->r0)
-    mova                m4, [%1+ 4*32]  ; in4
-    mova                m5, [%1+11*32]  ; in11
-    mova               m12, [%1+ 3*32]  ; in3
-    mova               m13, [%1+12*32]  ; in12
-
-    VP9_UNPACK_MULSUB_2D_4X  5,  4,  7,  6, 14811,  7005    ; m5/7=t5[d], m4/6=t4[d]
-    VP9_UNPACK_MULSUB_2D_4X 12, 13, 14, 15,  5520, 15426    ; m12/14=t13[d], m13/15=t12[d]
-    VP9_RND_SH_SUMSUB_BA    13,  4, 15,  6,  9, [pd_8192]   ; m13=t4[w], m4=t12[w]
-    VP9_RND_SH_SUMSUB_BA    12,  5, 14,  7,  9, [pd_8192]   ; m12=t5[w], m5=t13[w]
-
-    mova   [tmpq+ 2*%%str], m8          ; t1:m9->r2
-    mova   [tmpq+ 3*%%str], m2          ; t2:m2->r3
-    mova   [tmpq+ 4*%%str], m3          ; t3:m3->r4
-    mova   [tmpq+ 5*%%str], m13         ; t4:m13->r5
+    SCRATCH              4, 12, tmpq+ 2*%%str
+    VP9_RND_SH_SUMSUB_BA     2,  1,  5,  7,  4, [pd_8192]   ; m2=t2[w], m1=t10[w]
+    UNSCRATCH            4, 12, tmpq+ 2*%%str
+    VP9_RND_SH_SUMSUB_BA     3,  0,  4,  6,  5, [pd_8192]   ; m3=t3[w], m0=t11[w]
+
+    SCRATCH              0, 12, tmpq+ 2*%%str
+    SCRATCH              1, 13, tmpq+13*%%str
+    mova   [tmpq+ 5*%%str], m2
+    mova   [tmpq+10*%%str], m3
+
+    mova                m2, [%1+ 4*32]  ; in4
+    mova                m3, [%1+11*32]  ; in11
+    mova                m0, [%1+ 3*32]  ; in3
+    mova                m1, [%1+12*32]  ; in12
+
+    VP9_UNPACK_MULSUB_2D_4X  3,  2,  7,  6, 14811,  7005    ; m3/7=t5[d], m2/6=t4[d]
+    VP9_UNPACK_MULSUB_2D_4X  0,  1,  4,  5,  5520, 15426    ; m0/4=t13[d], m1/5=t12[d]
+    SCRATCH              4, 9, tmpq+ 4*%%str
+    VP9_RND_SH_SUMSUB_BA     1,  2,  5,  6,  4, [pd_8192]   ; m1=t4[w], m2=t12[w]
+    UNSCRATCH            4, 9, tmpq+ 4*%%str
+    VP9_RND_SH_SUMSUB_BA     0,  3,  4,  7,  6, [pd_8192]   ; m0=t5[w], m3=t13[w]
+
+    SCRATCH              0,  8, tmpq+ 4*%%str
+    mova   [tmpq+11*%%str], m1          ; t4:m1->r11
+    UNSCRATCH            0, 10, tmpq+ 0*%%str
+    UNSCRATCH            1, 11, tmpq+15*%%str
+
+    ; round 2 interleaved part 1
+    VP9_UNPACK_MULSUB_2D_4X  0,  1,  6,  7, 16069,  3196    ; m1/7=t8[d], m0/6=t9[d]
+    VP9_UNPACK_MULSUB_2D_4X  3,  2,  5,  4,  3196, 16069    ; m3/5=t12[d], m2/4=t13[d]
+    SCRATCH              4, 9, tmpq+ 3*%%str
+    VP9_RND_SH_SUMSUB_BA     3,  1,  5,  7,  4, [pd_8192]   ; m3=t8[w], m1=t12[w]
+    UNSCRATCH            4, 9, tmpq+ 3*%%str
+    VP9_RND_SH_SUMSUB_BA     2,  0,  4,  6,  5, [pd_8192]   ; m2=t9[w], m0=t13[w]
+
+    SCRATCH              0, 10, tmpq+ 0*%%str
+    SCRATCH              1, 11, tmpq+15*%%str
+    SCRATCH              2, 14, tmpq+ 3*%%str
+    SCRATCH              3, 15, tmpq+12*%%str
+
     mova                m2, [%1+ 6*32]  ; in6
     mova                m3, [%1+ 9*32]  ; in9
-    mova                m8, [%1+ 1*32]  ; in1
-    mova                m9, [%1+14*32]  ; in14
+    mova                m0, [%1+ 1*32]  ; in1
+    mova                m1, [%1+14*32]  ; in14
 
     VP9_UNPACK_MULSUB_2D_4X  3,  2,  7,  6, 13160,  9760    ; m3/7=t7[d], m2/6=t6[d]
-    VP9_UNPACK_MULSUB_2D_4X  8,  9, 13, 14,  2404, 16207    ; m8/13=t15[d], m9/14=t14[d]
-    VP9_RND_SH_SUMSUB_BA     9,  2, 14,  6, 15, [pd_8192]   ; m9=t6[w], m2=t14[w]
-    VP9_RND_SH_SUMSUB_BA     8,  3, 13,  7, 15, [pd_8192]   ; m8=t7[w], m3=t15[w]
+    VP9_UNPACK_MULSUB_2D_4X  0,  1,  4,  5,  2404, 16207    ; m0/4=t15[d], m1/5=t14[d]
+    SCRATCH              4, 9, tmpq+ 6*%%str
+    VP9_RND_SH_SUMSUB_BA     1,  2,  5,  6,  4, [pd_8192]   ; m1=t6[w], m2=t14[w]
+    UNSCRATCH            4, 9, tmpq+ 6*%%str
+    VP9_RND_SH_SUMSUB_BA     0,  3,  4,  7,  6, [pd_8192]   ; m0=t7[w], m3=t15[w]
+
+    ; r8=t0, r7=t1, r5=t2, r10=t3, r11=t4, m8|r4=t5, m1=t6, m0=t7
+    ; m10|r0=t8, m11|r15=t9, m13|r13=t10, m12|r2=t11, m14|r3=t12, m15|r12=t13, m2=t14, m3=t15
+
+    UNSCRATCH            4, 12, tmpq+ 2*%%str
+    UNSCRATCH            5, 13, tmpq+13*%%str
+    SCRATCH              0, 12, tmpq+ 1*%%str
+    SCRATCH              1, 13, tmpq+14*%%str
+
+    ; remainder of round 2 (rest of t8-15)
+    VP9_UNPACK_MULSUB_2D_4X  5,  4,  6,  7,  9102, 13623    ; m5/6=t11[d], m4/7=t10[d]
+    VP9_UNPACK_MULSUB_2D_4X  3,  2,  1,  0, 13623,  9102    ; m3/1=t14[d], m2/0=t15[d]
+    SCRATCH              0, 9, tmpq+ 6*%%str
+    VP9_RND_SH_SUMSUB_BA     3,  4,  1,  7,  0, [pd_8192]   ; m3=t10[w], m4=t14[w]
+    UNSCRATCH            0, 9, tmpq+ 6*%%str
+    VP9_RND_SH_SUMSUB_BA     2,  5,  0,  6,  1, [pd_8192]   ; m2=t11[w], m5=t15[w]
+
+    ; m15|r12=t8, m14|r3=t9, m3=t10, m2=t11, m11|r15=t12, m10|r0=t13, m4=t14, m5=t15
+
+    UNSCRATCH            6, 14, tmpq+ 3*%%str
+    UNSCRATCH            7, 15, tmpq+12*%%str
+
+    SUMSUB_BA                w,  3,  7,  1
+    PSIGNW                  m3, [pw_m1]                     ; m3=out1[w], m7=t10[w]
+    SUMSUB_BA                w,  2,  6,  1                  ; m2=out14[w], m6=t11[w]
+
+%if cpuflag(ssse3)
+    SUMSUB_BA                w,  7,  6,  1
+    pmulhrsw                m7, [pw_11585x2]                ; m7=out6[w]
+    pmulhrsw                m6, [pw_11585x2]                ; m6=out9[w]
+%else
+    VP9_UNPACK_MULSUB_2W_4X  6,  7, 11585, 11585, [pd_8192], 1, 0
+%endif
 
-    ; r0=t0, r2=t1, r3=t2, r4=t3, r5=t4, m12=t5, m9=t6, m8=t7
-    ; m0=t8, m1=t9, m11=t10, m10=t11, m4=t12, m5=t13, m2=t14, m3=t15
+    mova       [tmpq+ 3*%%str], m6
+    mova       [tmpq+ 6*%%str], m7
+    UNSCRATCH                6, 10, tmpq+ 0*%%str
+    UNSCRATCH                7, 11, tmpq+15*%%str
+    mova       [tmpq+13*%%str], m2
+    SCRATCH                  3, 11, tmpq+ 9*%%str
+
+    VP9_UNPACK_MULSUB_2D_4X  7,  6,  2,  3, 15137,  6270    ; m6/3=t13[d], m7/2=t12[d]
+    VP9_UNPACK_MULSUB_2D_4X  5,  4,  1,  0,  6270, 15137    ; m5/1=t14[d], m4/0=t15[d]
+    SCRATCH              0, 9, tmpq+ 2*%%str
+    VP9_RND_SH_SUMSUB_BA     5,  6,  1,  3,  0, [pd_8192]   ; m5=out2[w], m6=t14[w]
+    UNSCRATCH            0, 9, tmpq+ 2*%%str
+    VP9_RND_SH_SUMSUB_BA     4,  7,  0,  2,  1, [pd_8192]
+    PSIGNW                  m4, [pw_m1]                     ; m4=out13[w], m7=t15[w]
+
+%if cpuflag(ssse3)
+    SUMSUB_BA                w,  7,  6,  1
+    pmulhrsw                m7, [pw_m11585x2]               ; m7=out5[w]
+    pmulhrsw                m6, [pw_11585x2]                ; m6=out10[w]
+%else
+    PSIGNW                  m7, [pw_m1]
+    VP9_UNPACK_MULSUB_2W_4X  7,  6, 11585, 11585, [pd_8192], 1, 0
+%endif
 
-    ; handle t8-15 first
-    VP9_UNPACK_MULSUB_2D_4X  0,  1,  6,  7, 16069,  3196    ; m1/7=t8[d], m0/6=t9[d]
-    VP9_UNPACK_MULSUB_2D_4X  5,  4, 13, 14,  3196, 16069    ; m5/13=t12[d], m4/14=t13[d]
-    VP9_RND_SH_SUMSUB_BA     5,  1, 13,  7, 15, [pd_8192]   ; m5=t8[w], m1=t12[w]
-    VP9_RND_SH_SUMSUB_BA     4,  0, 14,  6, 15, [pd_8192]   ; m4=t9[w], m0=t13[w]
-
-    VP9_UNPACK_MULSUB_2D_4X 11, 10,  6,  7,  9102, 13623    ; m11/6=t11[d], m10/7=t10[d]
-    VP9_UNPACK_MULSUB_2D_4X  3,  2, 13, 14, 13623,  9102    ; m3/13=t14[d], m2/14=t15[d]
-    VP9_RND_SH_SUMSUB_BA     3, 10, 13,  7, 15, [pd_8192]   ; m3=t10[w], m10=t14[w]
-    VP9_RND_SH_SUMSUB_BA     2, 11, 14,  6, 15, [pd_8192]   ; m2=t11[w], m11=t15[w]
-
-    ; m5=t8, m4=t9, m3=t10, m2=t11, m1=t12, m0=t13, m10=t14, m11=t15
-
-    VP9_UNPACK_MULSUB_2D_4X  1,  0,  6,  7, 15137,  6270    ; m1/6=t13[d], m0/7=t12[d]
-    VP9_UNPACK_MULSUB_2D_4X 11, 10, 13, 14,  6270, 15137    ; m11/13=t14[d], m10/14=t15[d]
-    VP9_RND_SH_SUMSUB_BA    11,  0, 13,  7, 15, [pd_8192]   ; m11=out2[w], m0=t14[w]
-    VP9_RND_SH_SUMSUB_BA    10,  1, 14,  6, 15, [pd_8192]
-    psignw                 m10, [pw_m1]                     ; m10=out13[w], m1=t15[w]
-
-    SUMSUB_BA                w,  3,  5, 15
-    psignw                  m3, [pw_m1]                     ; m3=out1[w], m5=t10[w]
-    SUMSUB_BA                w,  2,  4, 15                  ; m2=out14[w], m4=t11[w]
-
-    SUMSUB_BA                w,  5,  4, 15
-    pmulhrsw                m5, [pw_11585x2]                ; m5=out6[w]
-    pmulhrsw                m4, [pw_11585x2]                ; m4=out9[w]
-    SUMSUB_BA                w,  1,  0, 15
-    pmulhrsw                m1, [pw_m11585x2]               ; m1=out5[w]
-    pmulhrsw                m0, [pw_11585x2]                ; m0=out10[w]
-
-    ; m3=out1, m11=out2, m1=out5, m5=out6, m4=out9, m0=out10, m10=out13, m2=out14
-
-    mova                    m6, [tmpq+ 0*%%str]
-    mova                    m7, [tmpq+ 2*%%str]
-    mova                   m13, [tmpq+ 3*%%str]
-    mova                   m14, [tmpq+ 4*%%str]
-    mova                   m15, [tmpq+ 5*%%str]
-    mova       [tmpq+ 8*%%str], m5
-    mova       [tmpq+ 9*%%str], m4
-    mova       [tmpq+10*%%str], m0
-    mova       [tmpq+11*%%str], m10
-    mova       [tmpq+12*%%str], m2
+    ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, m6=out10, m4=out13, r2=out14
+
+    mova                    m2, [tmpq+ 8*%%str]
+    mova                    m3, [tmpq+ 7*%%str]
+    mova                    m1, [tmpq+11*%%str]
+    mova       [tmpq+ 7*%%str], m6
+    mova       [tmpq+11*%%str], m4
+    mova                    m4, [tmpq+ 5*%%str]
+    SCRATCH                  5, 14, tmpq+ 5*%%str
+    SCRATCH                  7, 15, tmpq+ 8*%%str
+    UNSCRATCH                6,  8, tmpq+ 4*%%str
+    UNSCRATCH                5, 12, tmpq+ 1*%%str
+    UNSCRATCH                7, 13, tmpq+14*%%str
+
+    ; m2=t0, m3=t1, m9=t2, m0=t3, m1=t4, m8=t5, m13=t6, m12=t7
+    ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
+
+    SUMSUB_BA                w,  1,  2, 0                   ; m1=t0[w], m2=t4[w]
+    mova                    m0, [tmpq+10*%%str]
+    SCRATCH                  1, 12, tmpq+ 1*%%str
+    SUMSUB_BA                w,  6,  3, 1                   ; m8=t1[w], m3=t5[w]
+    SCRATCH                  6, 13, tmpq+ 4*%%str
+    SUMSUB_BA                w,  7,  4, 1                   ; m13=t2[w], m9=t6[w]
+    SCRATCH                  7,  8, tmpq+10*%%str
+    SUMSUB_BA                w,  5,  0, 1                   ; m12=t3[w], m0=t7[w]
+    SCRATCH                  5,  9, tmpq+14*%%str
+
+    VP9_UNPACK_MULSUB_2D_4X  2,  3,  7,  5, 15137,  6270    ; m2/6=t5[d], m3/10=t4[d]
+    VP9_UNPACK_MULSUB_2D_4X  0,  4,  1,  6,  6270, 15137    ; m0/14=t6[d], m9/15=t7[d]
+    SCRATCH                  6, 10, tmpq+ 0*%%str
+    VP9_RND_SH_SUMSUB_BA     0,  3,  1,  5,  6, [pd_8192]
+    UNSCRATCH                6, 10, tmpq+ 0*%%str
+    PSIGNW                  m0, [pw_m1]                     ; m0=out3[w], m3=t6[w]
+    VP9_RND_SH_SUMSUB_BA     4,  2,  6,  7,  5, [pd_8192]   ; m9=out12[w], m2=t7[w]
+
+    UNSCRATCH                1,  8, tmpq+10*%%str
+    UNSCRATCH                5,  9, tmpq+14*%%str
+    UNSCRATCH                6, 12, tmpq+ 1*%%str
+    UNSCRATCH                7, 13, tmpq+ 4*%%str
+    SCRATCH                  4,  9, tmpq+14*%%str
+
+    SUMSUB_BA                w,  1,  6,  4                  ; m13=out0[w], m1=t2[w]
+    SUMSUB_BA                w,  5,  7,  4
+    PSIGNW                  m5, [pw_m1]                     ; m12=out15[w], m8=t3[w]
+
+%if cpuflag(ssse3)
+    SUMSUB_BA               w,   7,  6,  4
+    pmulhrsw                m7, [pw_m11585x2]               ; m8=out7[w]
+    pmulhrsw                m6, [pw_11585x2]                ; m1=out8[w]
+    SUMSUB_BA                w,  3,  2,  4
+    pmulhrsw                m3, [pw_11585x2]                ; m3=out4[w]
+    pmulhrsw                m2, [pw_11585x2]                ; m2=out11[w]
+%else
+    SCRATCH                  5,  8, tmpq+10*%%str
+    PSIGNW                  m7, [pw_m1]
+    VP9_UNPACK_MULSUB_2W_4X  7,  6, 11585, 11585, [pd_8192],  5,  4
+    VP9_UNPACK_MULSUB_2W_4X  2,  3, 11585, 11585, [pd_8192],  5,  4
+    UNSCRATCH                5,  8, tmpq+10*%%str
+%endif
 
-    ; m6=t0, m7=t1, m13=t2, m14=t3, m15=t4, m12=t5, m9=t6, m8=t7
-    ; m3=out1, m11=out2, m1=out5, r8=out6, r9=out9, r10=out10, r11=out13, r12=out14
-
-    SUMSUB_BA                w, 15,  6,  0                  ; m15=t0[w], m6=t4[w]
-    SUMSUB_BA                w, 12,  7,  0                  ; m12=t1[w], m7=t5[w]
-    SUMSUB_BA                w,  9, 13,  0                  ; m9=t2[w], m13=t6[w]
-    SUMSUB_BA                w,  8, 14,  0                  ; m8=t3[w], m14=t7[w]
-
-    VP9_UNPACK_MULSUB_2D_4X  6,  7,  0,  2, 15137,  6270    ; m6/0=t5[d], m7/2=t4[d]
-    VP9_UNPACK_MULSUB_2D_4X 14, 13,  4,  5,  6270, 15137    ; m14/4=t6[d], m13/5=t7[d]
-    VP9_RND_SH_SUMSUB_BA    14,  7,  4,  2, 10, [pd_8192]
-    psignw                 m14, [pw_m1]                     ; m14=out3[w], m7=t6[w]
-    VP9_RND_SH_SUMSUB_BA    13,  6,  5,  0, 10, [pd_8192]   ; m13=out12[w], m6=t7[w]
-    SUMSUB_BA                w,  9, 15, 10                  ; m9=out0[w], m15=t2[w]
-    SUMSUB_BA                w,  8, 12, 10
-    psignw                  m8, [pw_m1]                     ; m8=out15[w], m12=t3[w]
-
-    SUMSUB_BA                w, 12, 15, 10
-    pmulhrsw               m12, [pw_m11585x2]               ; m12=out7[w]
-    pmulhrsw               m15, [pw_11585x2]                ; m15=out8[w]
-    SUMSUB_BA                w,  7,  6, 10
-    pmulhrsw                m7, [pw_11585x2]                ; m7=out4[w]
-    pmulhrsw                m6, [pw_11585x2]                ; m6=out11[w]
-
-    ; m9=out0, m14=out3, m7=out4, m12=out7, m15=out8, m6=out11, m13=out12, m8=out15
-    ; m3=out1, m11=out2, m1=out5, r8=out6, r9=out9, r10=out10, r11=out13, r12=out14
+    ; m13=out0, m0=out3, m3=out4, m8=out7, m1=out8, m2=out11, m9=out12, m12=out15
+    ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
 
 %if %2 == 1
-    mova                    m0, [tmpq+ 8*%%str]
-    TRANSPOSE8x8W            9, 3, 11, 14, 7, 1, 0, 12, 2
-    mova          [tmpq+ 0*16], m9
-    mova          [tmpq+ 2*16], m3
-    mova          [tmpq+ 4*16], m11
-    mova          [tmpq+ 6*16], m14
-    mova                    m9, [tmpq+ 9*%%str]
-    mova                    m3, [tmpq+10*%%str]
-    mova                   m11, [tmpq+11*%%str]
-    mova                   m14, [tmpq+12*%%str]
-    mova          [tmpq+ 8*16], m7
-    mova          [tmpq+10*16], m1
-    mova          [tmpq+12*16], m0
-    mova          [tmpq+14*16], m12
-
-    TRANSPOSE8x8W           15, 9, 3, 6, 13, 11, 14, 8, 2
-    mova          [tmpq+ 1*16], m15
-    mova          [tmpq+ 3*16], m9
-    mova          [tmpq+ 5*16], m3
-    mova          [tmpq+ 7*16], m6
-    mova          [tmpq+ 9*16], m13
-    mova          [tmpq+11*16], m11
-    mova          [tmpq+13*16], m14
-    mova          [tmpq+15*16], m8
+%if ARCH_X86_64
+    mova                   m13, [tmpq+ 6*%%str]
+    TRANSPOSE8x8W            1, 11, 14, 0, 3, 15, 13, 7, 10
+    mova          [tmpq+ 0*16], m1
+    mova          [tmpq+ 2*16], m11
+    mova          [tmpq+ 4*16], m14
+    mova          [tmpq+ 6*16], m0
+    mova                    m1, [tmpq+ 3*%%str]
+    mova                   m11, [tmpq+ 7*%%str]
+    mova                   m14, [tmpq+11*%%str]
+    mova                    m0, [tmpq+13*%%str]
+    mova          [tmpq+ 8*16], m3
+    mova          [tmpq+10*16], m15
+    mova          [tmpq+12*16], m13
+    mova          [tmpq+14*16], m7
+
+    TRANSPOSE8x8W            6, 1, 11, 2, 9, 14, 0, 5, 10
+    mova          [tmpq+ 1*16], m6
+    mova          [tmpq+ 3*16], m1
+    mova          [tmpq+ 5*16], m11
+    mova          [tmpq+ 7*16], m2
+    mova          [tmpq+ 9*16], m9
+    mova          [tmpq+11*16], m14
+    mova          [tmpq+13*16], m0
+    mova          [tmpq+15*16], m5
 %else
-    mova                    m5, [tmpq+ 8*%%str]
-    pxor                    m0, m0
+    mova       [tmpq+12*%%str], m2
+    mova       [tmpq+ 1*%%str], m5
+    mova       [tmpq+15*%%str], m6
+    mova                    m2, [tmpq+ 9*%%str]
+    mova                    m5, [tmpq+ 5*%%str]
+    mova                    m6, [tmpq+ 8*%%str]
+    TRANSPOSE8x8W            1, 2, 5, 0, 3, 6, 4, 7, [tmpq+ 6*%%str], [tmpq+ 8*%%str], 1
+    mova          [tmpq+ 0*16], m1
+    mova          [tmpq+ 2*16], m2
+    mova          [tmpq+ 4*16], m5
+    mova          [tmpq+ 6*16], m0
+    mova          [tmpq+10*16], m6
+    mova                    m3, [tmpq+12*%%str]
+    mova          [tmpq+12*16], m4
+    mova                    m4, [tmpq+14*%%str]
+    mova          [tmpq+14*16], m7
+
+    mova                    m0, [tmpq+15*%%str]
+    mova                    m1, [tmpq+ 3*%%str]
+    mova                    m2, [tmpq+ 7*%%str]
+    mova                    m5, [tmpq+11*%%str]
+    mova                    m7, [tmpq+ 1*%%str]
+    TRANSPOSE8x8W            0, 1, 2, 3, 4, 5, 6, 7, [tmpq+13*%%str], [tmpq+ 9*%%str], 1
+    mova          [tmpq+ 1*16], m0
+    mova          [tmpq+ 3*16], m1
+    mova          [tmpq+ 5*16], m2
+    mova          [tmpq+ 7*16], m3
+    mova          [tmpq+11*16], m5
+    mova          [tmpq+13*16], m6
+    mova          [tmpq+15*16], m7
+%endif
+%else
+    pxor                    m4, m4
+
+%if cpuflag(ssse3)
+%define ROUND_REG [pw_512]
+%else
+%define ROUND_REG [pw_32]
+%endif
 
-    pmulhrsw                m9, [pw_512]
-    pmulhrsw                m3, [pw_512]
-    VP9_STORE_2X             9,  3, 2, 4, 0
+%if ARCH_X86_64
+    mova                   m12, [tmpq+ 6*%%str]
+    VP9_IDCT8_WRITEx2        1, 11, 10,  8,  4, ROUND_REG, 6
     lea                   dstq, [dstq+strideq*2]
-    pmulhrsw               m11, [pw_512]
-    pmulhrsw               m14, [pw_512]
-    VP9_STORE_2X            11, 14, 2, 4, 0
+    VP9_IDCT8_WRITEx2       14,  0, 10,  8,  4, ROUND_REG, 6
     lea                   dstq, [dstq+strideq*2]
-    pmulhrsw                m7, [pw_512]
-    pmulhrsw                m1, [pw_512]
-    VP9_STORE_2X             7,  1, 2, 4, 0
+    VP9_IDCT8_WRITEx2        3, 15, 10,  8,  4, ROUND_REG, 6
     lea                   dstq, [dstq+strideq*2]
-    pmulhrsw                m5, [pw_512]
-    pmulhrsw               m12, [pw_512]
-    VP9_STORE_2X             5, 12, 2, 4, 0
+    VP9_IDCT8_WRITEx2       12,  7, 10,  8,  4, ROUND_REG, 6
     lea                   dstq, [dstq+strideq*2]
 
-    mova                    m9, [tmpq+ 9*%%str]
-    mova                    m3, [tmpq+10*%%str]
-    mova                   m11, [tmpq+11*%%str]
-    mova                   m14, [tmpq+12*%%str]
+    mova                    m1, [tmpq+ 3*%%str]
+    mova                   m11, [tmpq+ 7*%%str]
+    mova                   m14, [tmpq+11*%%str]
+    mova                    m0, [tmpq+13*%%str]
 
-    pmulhrsw               m15, [pw_512]
-    pmulhrsw                m9, [pw_512]
-    VP9_STORE_2X            15,  9, 2, 4, 0
+    VP9_IDCT8_WRITEx2        6,  1, 10,  8,  4, ROUND_REG, 6
     lea                   dstq, [dstq+strideq*2]
-    pmulhrsw                m3, [pw_512]
-    pmulhrsw                m6, [pw_512]
-    VP9_STORE_2X             3,  6, 2, 4, 0
+    VP9_IDCT8_WRITEx2       11,  2, 10,  8,  4, ROUND_REG, 6
     lea                   dstq, [dstq+strideq*2]
-    pmulhrsw               m13, [pw_512]
-    pmulhrsw               m11, [pw_512]
-    VP9_STORE_2X            13, 11, 2, 4, 0
+    VP9_IDCT8_WRITEx2        9, 14, 10,  8,  4, ROUND_REG, 6
     lea                   dstq, [dstq+strideq*2]
-    pmulhrsw               m14, [pw_512]
-    pmulhrsw                m8, [pw_512]
-    VP9_STORE_2X            14,  8, 2, 4, 0
+    VP9_IDCT8_WRITEx2        0,  5, 10,  8,  4, ROUND_REG, 6
+%else
+    mova       [tmpq+ 0*%%str], m2
+    mova       [tmpq+ 1*%%str], m5
+    mova       [tmpq+ 2*%%str], m6
+    mova                    m2, [tmpq+ 9*%%str]
+    VP9_IDCT8_WRITEx2        1,  2,  5,  6,  4, ROUND_REG, 6
+    lea                   dstq, [dstq+strideq*2]
+    mova                    m5, [tmpq+ 5*%%str]
+    VP9_IDCT8_WRITEx2        5,  0,  1,  2,  4, ROUND_REG, 6
+    lea                   dstq, [dstq+strideq*2]
+    mova                    m5, [tmpq+ 8*%%str]
+    VP9_IDCT8_WRITEx2        3,  5,  1,  2,  4, ROUND_REG, 6
+    lea                   dstq, [dstq+strideq*2]
+    mova                    m5, [tmpq+ 6*%%str]
+    VP9_IDCT8_WRITEx2        5,  7,  1,  2,  4, ROUND_REG, 6
+    lea                   dstq, [dstq+strideq*2]
+
+    mova                    m0, [tmpq+ 2*%%str]
+    mova                    m3, [tmpq+ 3*%%str]
+    VP9_IDCT8_WRITEx2        0,  3,  1,  2,  4, ROUND_REG, 6
+    lea                   dstq, [dstq+strideq*2]
+    mova                    m0, [tmpq+ 7*%%str]
+    mova                    m3, [tmpq+ 0*%%str]
+    VP9_IDCT8_WRITEx2        0,  3,  1,  2,  4, ROUND_REG, 6
+    lea                   dstq, [dstq+strideq*2]
+    mova                    m0, [tmpq+14*%%str]
+    mova                    m3, [tmpq+11*%%str]
+    VP9_IDCT8_WRITEx2        0,  3,  1,  2,  4, ROUND_REG, 6
+    lea                   dstq, [dstq+strideq*2]
+    mova                    m0, [tmpq+13*%%str]
+    mova                    m3, [tmpq+ 1*%%str]
+    VP9_IDCT8_WRITEx2        0,  3,  1,  2,  4, ROUND_REG, 6
+%endif
+
+    SWAP                     0,  4 ; zero
+%undef ROUND_REG
 %endif
 %endmacro
 
@@ -1166,12 +1872,18 @@ cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tm
     RET
 %endmacro
 
+%define PSIGNW PSIGNW_MMX
+IADST16_FN idct,  IDCT16,  iadst, IADST16, sse2
+IADST16_FN iadst, IADST16, idct,  IDCT16,  sse2
+IADST16_FN iadst, IADST16, iadst, IADST16, sse2
+%define PSIGNW PSIGNW_SSSE3
 IADST16_FN idct,  IDCT16,  iadst, IADST16, ssse3
-IADST16_FN idct,  IDCT16,  iadst, IADST16, avx
 IADST16_FN iadst, IADST16, idct,  IDCT16,  ssse3
-IADST16_FN iadst, IADST16, idct,  IDCT16,  avx
 IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
+IADST16_FN idct,  IDCT16,  iadst, IADST16, avx
+IADST16_FN iadst, IADST16, idct,  IDCT16,  avx
 IADST16_FN iadst, IADST16, iadst, IADST16, avx
+%undef PSIGNW
 
 ;---------------------------------------------------------------------------------------------
 ; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
@@ -1180,14 +1892,7 @@ IADST16_FN iadst, IADST16, iadst, IADST16, avx
 %macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
 %assign %%str 16*%2*%2
     ; first do t0-15, this can be done identical to idct16x16
-    VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq+ 4*%%str
-
-    ; backup a different register
-    mova    [tmpq+30*%%str], m15    ; t15
-    mova                m7, [tmpq+ 4*%%str]
-
-    SUMSUB_BA            w,  6,  9, 15      ; t6, t9
-    SUMSUB_BA            w,  7,  8, 15      ; t7, t8
+    VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str
 
     ; store everything on stack to make space available for t16-31
     ; we store interleaved with the output of the second half (t16-31)
@@ -1198,258 +1903,339 @@ IADST16_FN iadst, IADST16, iadst, IADST16, avx
     mova    [tmpq+12*%%str], m3     ; t3
     mova    [tmpq+16*%%str], m4     ; t4
     mova    [tmpq+20*%%str], m5     ; t5
-    mova    [tmpq+24*%%str], m6     ; t6
-    mova    [tmpq+28*%%str], m7     ; t7
-    mova    [tmpq+ 2*%%str], m8     ; t8
-    mova    [tmpq+ 6*%%str], m9     ; t9
-    mova    [tmpq+10*%%str], m10    ; t10
-    mova    [tmpq+14*%%str], m11    ; t11
-    mova    [tmpq+18*%%str], m12    ; t12
-    mova    [tmpq+22*%%str], m13    ; t13
-    mova    [tmpq+26*%%str], m14    ; t14
+%if ARCH_X86_64
+    mova    [tmpq+22*%%str], m10    ; t10
+    mova    [tmpq+18*%%str], m11    ; t11
+    mova    [tmpq+14*%%str], m12    ; t12
+    mova    [tmpq+10*%%str], m13    ; t13
+    mova    [tmpq+ 6*%%str], m14    ; t14
+    mova    [tmpq+ 2*%%str], m15    ; t15
+%endif
+
+    mova                m0, [tmpq+ 30*%%str]
+    UNSCRATCH            1,  6, tmpq+26*%%str
+    UNSCRATCH            2,  8, tmpq+24*%%str
+    UNSCRATCH            3,  9, tmpq+28*%%str
+    SUMSUB_BA            w,  1,  3, 4       ; t6, t9
+    SUMSUB_BA            w,  0,  2, 4       ; t7, t8
+
+    mova    [tmpq+24*%%str], m1     ; t6
+    mova    [tmpq+28*%%str], m0     ; t7
+    mova    [tmpq+30*%%str], m2     ; t8
+    mova    [tmpq+26*%%str], m3     ; t9
 
     ; then, secondly, do t16-31
 %if %3 <= 8
     mova                 m4, [%1+ 1*64]
-    mova                 m3, [%1+ 3*64]
-    mova                 m0, [%1+ 5*64]
     mova                 m7, [%1+ 7*64]
 
-    pmulhrsw            m11,  m4, [pw_16364x2] ;t31
+    pmulhrsw             m1,  m4, [pw_16364x2] ;t31
     pmulhrsw             m4, [pw_804x2] ;t16
-    pmulhrsw             m8,  m7, [pw_m5520x2] ;t19
-    pmulhrsw             m7, [pw_15426x2] ;t28
-    pmulhrsw            m15,  m0, [pw_15893x2] ;t27
-    pmulhrsw             m0, [pw_3981x2] ;t20
-    pmulhrsw            m12,  m3, [pw_m2404x2] ;t23
-    pmulhrsw             m3, [pw_16207x2] ;t24
 
-    ; m4=t16/17, m8=t18/19, m0=t20/21, m12=t22/23,
-    ; m3=t24/25, m15=t26/27, m7=t28/29, m11=t30/31
+    VP9_UNPACK_MULSUB_2W_4X   5,  0,  1,  4, 16069,  3196, [pd_8192], 6,  2 ; t17, t30
 
-    VP9_UNPACK_MULSUB_2W_4X   5, 10, 11,  4, 16069,  3196, [pd_8192], 6,  9 ; t17, t30
-    VP9_UNPACK_MULSUB_2W_4X   9,  6,  7,  8, 3196, m16069, [pd_8192], 1, 14 ; t18, t29
-    ; from 1 stage forward
-    SUMSUB_BA                 w,  8,  4,  1
-    ; temporary storage
-    mova    [tmpq+17*%%str], m8             ; t16
-    mova    [tmpq+21*%%str], m4             ; t19
-    VP9_UNPACK_MULSUB_2W_4X   1, 14, 15,  0,  9102, 13623, [pd_8192], 4,  8 ; t21, t26
-    VP9_UNPACK_MULSUB_2W_4X  13,  2,  3, 12, 13623, m9102, [pd_8192], 4,  8 ; t22, t25
+    pmulhrsw             m3,  m7, [pw_m5520x2] ;t19
+    pmulhrsw             m7, [pw_15426x2] ;t28
 
-    ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
-    ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
+    SCRATCH               4, 13, tmpq+ 1*%%str
+    SCRATCH               5, 12, tmpq+15*%%str
+
+    VP9_UNPACK_MULSUB_2W_4X   2,  6,  7,  3, 3196, m16069, [pd_8192], 4,  5 ; t18, t29
 %else
-    mova                m10, [%1+ 1*64]
-    mova                m13, [%1+ 3*64]
-    mova                m14, [%1+ 5*64]
-    mova                 m9, [%1+ 7*64]
-    mova                 m8, [%1+ 9*64]
-    mova                m15, [%1+11*64]
-    mova                m12, [%1+13*64]
-    mova                m11, [%1+15*64]
+    mova                 m0, [%1+ 1*64]
+    mova                 m1, [%1+15*64]
 %if %3 <= 16
-    pmulhrsw             m5, m10, [pw_16364x2]
-    pmulhrsw            m10, [pw_804x2]
-    pmulhrsw             m4, m11, [pw_m11003x2]
-    pmulhrsw            m11, [pw_12140x2]
-    pmulhrsw             m7,  m8, [pw_14811x2]
-    pmulhrsw             m8, [pw_7005x2]
-    pmulhrsw             m6,  m9, [pw_m5520x2]
-    pmulhrsw             m9, [pw_15426x2]
-    pmulhrsw             m1, m14, [pw_15893x2]
-    pmulhrsw            m14, [pw_3981x2]
-    pmulhrsw             m0, m15, [pw_m8423x2]
-    pmulhrsw            m15, [pw_14053x2]
+    pmulhrsw             m5, m0, [pw_16364x2]
+    pmulhrsw             m0, [pw_804x2]
+    pmulhrsw             m4, m1, [pw_m11003x2]
+    pmulhrsw             m1, [pw_12140x2]
 %else
     mova                 m4, [%1+17*64]
-    mova                 m0, [%1+21*64]
+    mova                 m5, [%1+31*64]
+
+    VP9_UNPACK_MULSUB_2W_4X   0,  5, 16364,   804, [pd_8192], 2, 3 ; t16, t31
+    VP9_UNPACK_MULSUB_2W_4X   4,  1, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
+%endif
+    SUMSUB_BA             w,  4,  0,  2
+    SUMSUB_BA             w,  1,  5,  2
+
+    VP9_UNPACK_MULSUB_2W_4X   5,  0, 16069,  3196, [pd_8192], 2, 3 ; t17, t30
+
+    SCRATCH               4, 13, tmpq+ 1*%%str
+    SCRATCH               5, 12, tmpq+15*%%str
+
+    mova                 m2, [%1+ 7*64]
+    mova                 m3, [%1+ 9*64]
+%if %3 <= 16
+    pmulhrsw             m7,  m3, [pw_14811x2]
+    pmulhrsw             m3, [pw_7005x2]
+    pmulhrsw             m6,  m2, [pw_m5520x2]
+    pmulhrsw             m2, [pw_15426x2]
+%else
     mova                 m7, [%1+23*64]
     mova                 m6, [%1+25*64]
-    mova                 m1, [%1+27*64]
-    mova                 m5, [%1+31*64]
 
-    ; m10=in1, m4=in17, m8=in9, m6=in25, m14=in5, m0=in21, m12=in13, m2=in29,
-    ; m13=in3, m3=in19, m15=in11, m1=in27, m9=in7, m7=in23, m11=in15, m5=in31
-
-    VP9_UNPACK_MULSUB_2W_4X  10,  5, 16364,   804, [pd_8192], 2, 3 ; t16, t31
-    VP9_UNPACK_MULSUB_2W_4X   4, 11, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
-    VP9_UNPACK_MULSUB_2W_4X   8,  7, 14811,  7005, [pd_8192], 2, 3 ; t18, t29
-    VP9_UNPACK_MULSUB_2W_4X   6,  9,  5520, 15426, [pd_8192], 2, 3 ; t19, t28
-    VP9_UNPACK_MULSUB_2W_4X  14,  1, 15893,  3981, [pd_8192], 2, 3 ; t20, t27
-    VP9_UNPACK_MULSUB_2W_4X   0, 15,  8423, 14053, [pd_8192], 2, 3 ; t21, t26
-%endif
-
-    ; from 1 stage forward
-    SUMSUB_BA             w,  4, 10,  2
-    SUMSUB_BA             w,  8,  6,  2
-    ; from 2 stages forward
-    SUMSUB_BA             w,  8,  4,  2
-    ; temporary storage
-    mova    [tmpq+17*%%str], m8             ; t16
-    mova    [tmpq+21*%%str], m4             ; t19
+    VP9_UNPACK_MULSUB_2W_4X   3,  7, 14811,  7005, [pd_8192], 4, 5 ; t18, t29
+    VP9_UNPACK_MULSUB_2W_4X   6,  2,  5520, 15426, [pd_8192], 4, 5 ; t19, t28
+%endif
+    SUMSUB_BA             w,  3,  6,  4
+    SUMSUB_BA             w,  7,  2,  4
+
+    VP9_UNPACK_MULSUB_2W_4X   2,  6, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
+%endif
+
+    UNSCRATCH             5, 12, tmpq+15*%%str
+    SUMSUB_BA             w,  6,  0,  4
+    mova    [tmpq+25*%%str], m6             ; t19
+    UNSCRATCH             4, 13, tmpq+ 1*%%str
+    SUMSUB_BA             w,  7,  1,  6
+    SUMSUB_BA             w,  3,  4,  6
+    mova    [tmpq+23*%%str], m3             ; t16
+    SUMSUB_BA             w,  2,  5,  6
+
+    VP9_UNPACK_MULSUB_2W_4X   0,  5, 15137,  6270, [pd_8192], 6, 3 ; t18, t29
+    VP9_UNPACK_MULSUB_2W_4X   1,  4, 15137,  6270, [pd_8192], 6, 3 ; t19, t28
+
+    SCRATCH               0, 10, tmpq+ 1*%%str
+    SCRATCH               1, 11, tmpq+ 7*%%str
+    SCRATCH               2,  9, tmpq+ 9*%%str
+    SCRATCH               4, 14, tmpq+15*%%str
+    SCRATCH               5, 15, tmpq+17*%%str
+    SCRATCH               7, 13, tmpq+31*%%str
+
+%if %3 <= 8
+    mova                 m0, [%1+ 5*64]
+    mova                 m3, [%1+ 3*64]
+
+    pmulhrsw             m5,  m0, [pw_15893x2] ;t27
+    pmulhrsw             m0, [pw_3981x2] ;t20
+
+    VP9_UNPACK_MULSUB_2W_4X   1,  4,  5,  0,  9102, 13623, [pd_8192], 7,  2 ; t21, t26
+
+    pmulhrsw             m6,  m3, [pw_m2404x2] ;t23
+    pmulhrsw             m3, [pw_16207x2] ;t24
+
+    SCRATCH               5,  8, tmpq+ 5*%%str
+    SCRATCH               4, 12, tmpq+11*%%str
+
+    VP9_UNPACK_MULSUB_2W_4X   7,  2,  3,  6, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
+%else
+    mova                 m4, [%1+ 5*64]
+    mova                 m5, [%1+11*64]
 %if %3 <= 16
-    pmulhrsw             m3, m12, [pw_13160x2]
-    pmulhrsw            m12, [pw_9760x2]
-    pmulhrsw             m2, m13, [pw_m2404x2]
-    pmulhrsw            m13, [pw_16207x2]
+    pmulhrsw             m1, m4, [pw_15893x2]
+    pmulhrsw             m4, [pw_3981x2]
+    pmulhrsw             m0, m5, [pw_m8423x2]
+    pmulhrsw             m5, [pw_14053x2]
 %else
-    mova                 m2, [%1+29*64]
-    mova                 m3, [%1+19*64]
-    VP9_UNPACK_MULSUB_2W_4X  12,  3, 13160,  9760, [pd_8192], 4, 8 ; t22, t25
-    VP9_UNPACK_MULSUB_2W_4X   2, 13,  2404, 16207, [pd_8192], 4, 8 ; t23, t24
+    mova                 m0, [%1+21*64]
+    mova                 m1, [%1+27*64]
+
+    VP9_UNPACK_MULSUB_2W_4X   4,  1, 15893,  3981, [pd_8192], 2, 3 ; t20, t27
+    VP9_UNPACK_MULSUB_2W_4X   0,  5,  8423, 14053, [pd_8192], 2, 3 ; t21, t26
 %endif
+    SUMSUB_BA             w,  0,  4,  2
+    SUMSUB_BA             w,  5,  1,  2
 
-    ; m10=t16, m4=t17, m8=t18, m6=t19, m14=t20, m0=t21, m12=t22, m2=t23,
-    ; m13=t24, m3=t25, m15=t26, m1=t27, m9=t28, m7=t29, m11=t30, m5=t31
+    VP9_UNPACK_MULSUB_2W_4X   1,  4,  9102, 13623, [pd_8192], 2, 3 ; t21, t26
 
-    SUMSUB_BA             w,  0, 14,  4
-    SUMSUB_BA             w, 12,  2,  4
-    SUMSUB_BA             w,  3, 13,  4
-    SUMSUB_BA             w, 15,  1,  4
-    SUMSUB_BA             w,  7,  9,  4
-    SUMSUB_BA             w, 11,  5,  4
+    SCRATCH               5,  8, tmpq+ 5*%%str
+    SCRATCH               4, 12, tmpq+11*%%str
 
-    ; m4=t16, m10=t17, m6=t18, m8=t19, m0=t20, m14=t21, m2=t22, m12=t23,
-    ; m3=t24, m13=t25, m1=t26, m15=t27, m7=t28, m9=t29, m5=t30, m11=t31
+    mova                 m7, [%1+ 3*64]
+    mova                 m6, [%1+13*64]
+%if %3 <= 16
+    pmulhrsw             m3, m6, [pw_13160x2]
+    pmulhrsw             m6, [pw_9760x2]
+    pmulhrsw             m2, m7, [pw_m2404x2]
+    pmulhrsw             m7, [pw_16207x2]
+%else
+    mova                 m2, [%1+29*64]
+    mova                 m3, [%1+19*64]
+    VP9_UNPACK_MULSUB_2W_4X   6,  3, 13160,  9760, [pd_8192], 4, 5 ; t22, t25
+    VP9_UNPACK_MULSUB_2W_4X   2,  7,  2404, 16207, [pd_8192], 4, 5 ; t23, t24
+%endif
+    SUMSUB_BA             w,  6,  2,  4
+    SUMSUB_BA             w,  3,  7,  4
 
-    VP9_UNPACK_MULSUB_2W_4X   5, 10, 16069,  3196, [pd_8192], 4, 8 ; t17, t30
-    VP9_UNPACK_MULSUB_2W_4X   9,  6, 3196, m16069, [pd_8192], 4, 8 ; t18, t29
-    VP9_UNPACK_MULSUB_2W_4X   1, 14,  9102, 13623, [pd_8192], 4, 8 ; t21, t26
-    VP9_UNPACK_MULSUB_2W_4X  13,  2, 13623, m9102, [pd_8192], 4, 8 ; t22, t25
+    VP9_UNPACK_MULSUB_2W_4X   7,  2, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
 %endif
 
     ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
     ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
 
-    SUMSUB_BA             w,  9,  5,  4
-    SUMSUB_BA             w,  1, 13,  4
-    SUMSUB_BA             w,  0, 12,  4
-    SUMSUB_BA             w, 15,  3,  4
-    SUMSUB_BA             w, 14,  2,  4
-    SUMSUB_BA             w,  6, 10,  4
-    SUMSUB_BA             w,  7, 11,  4
+    UNSCRATCH             4, 12, tmpq+11*%%str
+    SUMSUB_BA             w,  0,  6, 5
+    SUMSUB_BA             w,  4,  2, 5
+    UNSCRATCH             5,  8, tmpq+ 5*%%str
+    SCRATCH               4,  8, tmpq+11*%%str
+    SUMSUB_BA             w,  1,  7, 4
+    SUMSUB_BA             w,  5,  3, 4
+    SCRATCH               5, 12, tmpq+ 5*%%str
+
+    VP9_UNPACK_MULSUB_2W_4X   3,  6, 6270, m15137, [pd_8192], 4, 5 ; t20, t27
+    VP9_UNPACK_MULSUB_2W_4X   2,  7, 6270, m15137, [pd_8192], 4, 5 ; t21, t26
 
     ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
     ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31
 
-    mova                 m8, [tmpq+17*%%str] ; t16
-    ; from 2 stages forward
-    SUMSUB_BA             w,  0,  8,  4
-    SUMSUB_BA             w, 15,  7,  4
-    ; from 3 stages forward
-    SUMSUB_BA             w,  8,  7,  4
-    pmulhrsw             m7, [pw_11585x2]
-    pmulhrsw             m8, [pw_11585x2]
-    ; store t16/t23
-    mova    [tmpq+ 1*%%str], m0     ; t16
-    mova    [tmpq+29*%%str], m7     ; t23
-
-    mova                 m4, [tmpq+21*%%str] ; t19
-    VP9_UNPACK_MULSUB_2W_4X  10,  5, 15137,  6270, [pd_8192], 0, 7 ; t18, t29
-    VP9_UNPACK_MULSUB_2W_4X  11,  4, 15137,  6270, [pd_8192], 0, 7 ; t19, t28
-    VP9_UNPACK_MULSUB_2W_4X   3, 12, 6270, m15137, [pd_8192], 0, 7 ; t20, t27
-    VP9_UNPACK_MULSUB_2W_4X   2, 13, 6270, m15137, [pd_8192], 0, 7 ; t21, t26
-
-    ; m8=t16, m9=t17, m10=t18, m11=t19, m3=t20, m2=t21, m1=t22, m0=t23,
-    ; m15=t24, m14=t25, m13=t26, m12=t27, m4=t28, m5=t29, m6=t30, m7=t31
-
-    SUMSUB_BA             w,  1,  9,  0
-    SUMSUB_BA             w,  2, 10,  0
-    SUMSUB_BA             w,  3, 11,  0
-    SUMSUB_BA             w, 12,  4,  0
-    SUMSUB_BA             w, 13,  5,  0
-    SUMSUB_BA             w, 14,  6,  0
+    UNSCRATCH             5,  9, tmpq+ 9*%%str
+    mova                 m4, [tmpq+23*%%str] ; t16
+%if ARCH_X86_64
+    SUMSUB_BA             w,  1,  5,  9
+    SUMSUB_BA             w,  0,  4,  9
+%else
+    SUMSUB_BADC           w,  1,  5,  0,  4
+%endif
+    mova    [tmpq+29*%%str], m1     ; t17
+    mova    [tmpq+21*%%str], m0     ; t16
+    UNSCRATCH             0, 10, tmpq+ 1*%%str
+    UNSCRATCH             1, 11, tmpq+ 7*%%str
+%if ARCH_X86_64
+    SUMSUB_BA             w,  2,  0,  9
+    SUMSUB_BA             w,  3,  1,  9
+%else
+    SUMSUB_BADC           w,  2,  0,  3,  1
+%endif
+    mova    [tmpq+ 9*%%str], m2     ; t18
+    mova    [tmpq+13*%%str], m3     ; t19
+    SCRATCH               0, 10, tmpq+23*%%str
+    SCRATCH               1, 11, tmpq+27*%%str
+
+    UNSCRATCH             2, 14, tmpq+15*%%str
+    UNSCRATCH             3, 15, tmpq+17*%%str
+    SUMSUB_BA             w,  6,  2, 0
+    SUMSUB_BA             w,  7,  3, 0
+    SCRATCH               6, 14, tmpq+ 3*%%str
+    SCRATCH               7, 15, tmpq+ 7*%%str
+
+    UNSCRATCH             0,  8, tmpq+11*%%str
+    mova                 m1, [tmpq+25*%%str] ; t19
+    UNSCRATCH             6, 12, tmpq+ 5*%%str
+    UNSCRATCH             7, 13, tmpq+31*%%str
+%if ARCH_X86_64
+    SUMSUB_BA             w,  0,  1,  9
+    SUMSUB_BA             w,  6,  7,  9
+%else
+    SUMSUB_BADC           w,  0,  1,  6,  7
+%endif
 
     ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
     ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31
 
-    SUMSUB_BA             w,  9,  6,  0
-    SUMSUB_BA             w, 10,  5,  0
-    SUMSUB_BA             w, 11,  4,  0
+%if cpuflag(ssse3)
+%if ARCH_X86_64
+    SUMSUB_BA             w,  4,  7,  8
+    SUMSUB_BA             w,  5,  1,  8
+%else
+    SUMSUB_BADC           w,  4,  7,  5,  1
+%endif
 
-    pmulhrsw             m6, [pw_11585x2]
-    pmulhrsw             m9, [pw_11585x2]
-    pmulhrsw             m5, [pw_11585x2]
-    pmulhrsw            m10, [pw_11585x2]
+    pmulhrsw             m7, [pw_11585x2]
     pmulhrsw             m4, [pw_11585x2]
-    pmulhrsw            m11, [pw_11585x2]
+    pmulhrsw             m1, [pw_11585x2]
+    pmulhrsw             m5, [pw_11585x2]
+
+    mova    [tmpq+ 5*%%str], m7     ; t23
+    SCRATCH               1, 13, tmpq+25*%%str
+    UNSCRATCH             7, 10, tmpq+23*%%str
+    UNSCRATCH             1, 11, tmpq+27*%%str
+
+%if ARCH_X86_64
+    SUMSUB_BA             w,  7,  3, 10
+    SUMSUB_BA             w,  1,  2, 10
+%else
+    SUMSUB_BADC           w,  7,  3,  1,  2
+%endif
+
+    pmulhrsw             m3, [pw_11585x2]
+    pmulhrsw             m7, [pw_11585x2]
+    pmulhrsw             m2, [pw_11585x2]
+    pmulhrsw             m1, [pw_11585x2]
+%else
+    SCRATCH               0,  8, tmpq+15*%%str
+    SCRATCH               6,  9, tmpq+17*%%str
+    VP9_UNPACK_MULSUB_2W_4X  7,  4, 11585, 11585, [pd_8192], 0, 6
+    mova    [tmpq+ 5*%%str], m7     ; t23
+    UNSCRATCH             7, 10, tmpq+23*%%str
+    VP9_UNPACK_MULSUB_2W_4X  1,  5, 11585, 11585, [pd_8192], 0, 6
+    SCRATCH               1, 13, tmpq+25*%%str
+    UNSCRATCH             1, 11, tmpq+27*%%str
+    VP9_UNPACK_MULSUB_2W_4X  3,  7, 11585, 11585, [pd_8192], 0, 6
+    VP9_UNPACK_MULSUB_2W_4X  2,  1, 11585, 11585, [pd_8192], 0, 6
+    UNSCRATCH             0,  8, tmpq+15*%%str
+    UNSCRATCH             6,  9, tmpq+17*%%str
+%endif
 
     ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
     ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31
 
-    ; store t17-19 (and t20-22 for pass 1) - keep t24-31 in registers for
-    ; final sumsub in pass 1, or keep t20-22 and t24-31 in registers for
-    ; final sumsub of pass 2
-    mova    [tmpq+ 5*%%str], m1     ; t17
-    mova    [tmpq+ 9*%%str], m2     ; t18
-    mova    [tmpq+13*%%str], m3     ; t19
-
     ; then do final pass to sumsub+store the two halves
 %if %2 == 1
-    mova    [tmpq+17*%%str], m4     ; t20
-    mova    [tmpq+21*%%str], m5     ; t21
-    mova    [tmpq+25*%%str], m6     ; t22
-
-    mova                 m0, [tmpq+ 0*%%str] ; t0
-    mova                 m1, [tmpq+ 4*%%str] ; t1
-    mova                 m2, [tmpq+ 8*%%str] ; t2
-    mova                 m3, [tmpq+12*%%str] ; t3
-    mova                 m4, [tmpq+16*%%str] ; t4
-    mova                 m5, [tmpq+20*%%str] ; t5
-    mova                 m6, [tmpq+24*%%str] ; t6
-
-    SUMSUB_BA             w, 15,  0, 7
-    mova    [tmpq+ 3*%%str], m0              ; t15
-    mova                 m7, [tmpq+28*%%str] ; t7
-    SUMSUB_BA             w, 14,  1, 0
-    SUMSUB_BA             w, 13,  2, 0
-    SUMSUB_BA             w, 12,  3, 0
-    SUMSUB_BA             w, 11,  4, 0
-    SUMSUB_BA             w, 10,  5, 0
-    SUMSUB_BA             w,  9,  6, 0
-    SUMSUB_BA             w,  8,  7, 0
-
-    TRANSPOSE8x8W        15, 14, 13, 12, 11, 10, 9, 8, 0
-    mova    [tmpq+ 0*%%str], m15
-    mova    [tmpq+ 4*%%str], m14
-    mova    [tmpq+ 8*%%str], m13
-    mova    [tmpq+12*%%str], m12
-    mova    [tmpq+16*%%str], m11
-    mova    [tmpq+20*%%str], m10
-    mova    [tmpq+24*%%str], m9
-    mova    [tmpq+28*%%str], m8
-
-    mova                  m0, [tmpq+ 3*%%str] ; t15
-    TRANSPOSE8x8W          7, 6, 5, 4, 3, 2, 1, 0, 8
-    mova    [tmpq+ 3*%%str], m7
-    mova    [tmpq+ 7*%%str], m6
-    mova    [tmpq+11*%%str], m5
-    mova    [tmpq+15*%%str], m4
-    mova    [tmpq+19*%%str], m3
-    mova    [tmpq+23*%%str], m2
-    mova    [tmpq+27*%%str], m1
-    mova    [tmpq+31*%%str], m0
-
-    mova                m15, [tmpq+ 2*%%str] ; t8
-    mova                m14, [tmpq+ 6*%%str] ; t9
-    mova                m13, [tmpq+10*%%str] ; t10
-    mova                m12, [tmpq+14*%%str] ; t11
-    mova                m11, [tmpq+18*%%str] ; t12
-    mova                m10, [tmpq+22*%%str] ; t13
-    mova                 m9, [tmpq+26*%%str] ; t14
-    mova                 m8, [tmpq+30*%%str] ; t15
-    mova                 m7, [tmpq+ 1*%%str] ; t16
-    mova                 m6, [tmpq+ 5*%%str] ; t17
+    mova    [tmpq+17*%%str], m2     ; t20
+    mova    [tmpq+ 1*%%str], m3     ; t21
+%if ARCH_X86_64
+    mova    [tmpq+25*%%str], m13    ; t22
+
+    mova                 m8, [tmpq+ 0*%%str] ; t0
+    mova                 m9, [tmpq+ 4*%%str] ; t1
+    mova                m12, [tmpq+ 8*%%str] ; t2
+    mova                m11, [tmpq+12*%%str] ; t3
+    mova                 m2, [tmpq+16*%%str] ; t4
+    mova                 m3, [tmpq+20*%%str] ; t5
+    mova                m13, [tmpq+24*%%str] ; t6
+
+    SUMSUB_BA             w,  6,  8,  10
+    mova    [tmpq+ 3*%%str], m8              ; t15
+    mova                m10, [tmpq+28*%%str] ; t7
+    SUMSUB_BA             w,  0,  9,  8
+    SUMSUB_BA             w, 15, 12,  8
+    SUMSUB_BA             w, 14, 11,  8
+    SUMSUB_BA             w,  1,  2,  8
+    SUMSUB_BA             w,  7,  3,  8
+    SUMSUB_BA             w,  5, 13,  8
+    SUMSUB_BA             w,  4, 10,  8
+
+    TRANSPOSE8x8W         6, 0, 15, 14, 1, 7, 5, 4, 8
+    mova    [tmpq+ 0*%%str], m6
+    mova    [tmpq+ 4*%%str], m0
+    mova    [tmpq+ 8*%%str], m15
+    mova    [tmpq+12*%%str], m14
+    mova    [tmpq+16*%%str], m1
+    mova    [tmpq+20*%%str], m7
+    mova    [tmpq+24*%%str], m5
+    mova    [tmpq+28*%%str], m4
+
+    mova                  m8, [tmpq+ 3*%%str] ; t15
+    TRANSPOSE8x8W         10, 13, 3, 2, 11, 12, 9, 8, 0
+    mova    [tmpq+ 3*%%str], m10
+    mova    [tmpq+ 7*%%str], m13
+    mova    [tmpq+11*%%str], m3
+    mova    [tmpq+15*%%str], m2
+    mova    [tmpq+19*%%str], m11
+    mova    [tmpq+23*%%str], m12
+    mova    [tmpq+27*%%str], m9
+    mova    [tmpq+31*%%str], m8
+
+    mova                m15, [tmpq+30*%%str] ; t8
+    mova                m14, [tmpq+26*%%str] ; t9
+    mova                m13, [tmpq+22*%%str] ; t10
+    mova                m12, [tmpq+18*%%str] ; t11
+    mova                m11, [tmpq+14*%%str] ; t12
+    mova                m10, [tmpq+10*%%str] ; t13
+    mova                 m9, [tmpq+ 6*%%str] ; t14
+    mova                 m8, [tmpq+ 2*%%str] ; t15
+    mova                 m7, [tmpq+21*%%str] ; t16
+    mova                 m6, [tmpq+29*%%str] ; t17
     mova                 m5, [tmpq+ 9*%%str] ; t18
     mova                 m4, [tmpq+13*%%str] ; t19
     mova                 m3, [tmpq+17*%%str] ; t20
-    mova                 m2, [tmpq+21*%%str] ; t21
+    mova                 m2, [tmpq+ 1*%%str] ; t21
     mova                 m1, [tmpq+25*%%str] ; t22
 
     SUMSUB_BA             w,  7,  8, 0
     mova    [tmpq+ 2*%%str], m8
-    mova                 m0, [tmpq+29*%%str] ; t23
+    mova                 m0, [tmpq+ 5*%%str] ; t23
     SUMSUB_BA             w,  6,  9, 8
     SUMSUB_BA             w,  5, 10, 8
     SUMSUB_BA             w,  4, 11, 8
@@ -1479,97 +2265,303 @@ IADST16_FN iadst, IADST16, iadst, IADST16, avx
     mova    [tmpq+26*%%str], m14
     mova    [tmpq+30*%%str], m15
 %else
+    mova                 m2, [tmpq+24*%%str] ; t6
+    mova                 m3, [tmpq+28*%%str] ; t7
+    SUMSUB_BADC           w,  5,  2,  4,  3
+    mova    [tmpq+24*%%str], m5
+    mova    [tmpq+23*%%str], m2
+    mova    [tmpq+28*%%str], m4
+    mova    [tmpq+19*%%str], m3
+
+    mova                 m2, [tmpq+16*%%str] ; t4
+    mova                 m3, [tmpq+20*%%str] ; t5
+    SUMSUB_BA             w,  1,  2,  5
+    SUMSUB_BA             w,  7,  3,  5
+    mova    [tmpq+15*%%str], m2
+    mova    [tmpq+11*%%str], m3
+
+    mova                 m2, [tmpq+ 0*%%str] ; t0
+    mova                 m3, [tmpq+ 4*%%str] ; t1
+    SUMSUB_BA             w,  6,  2,  5
+    SUMSUB_BA             w,  0,  3,  5
+    mova    [tmpq+31*%%str], m2
+    mova    [tmpq+27*%%str], m3
+
+    mova                 m2, [tmpq+ 8*%%str] ; t2
+    mova                 m3, [tmpq+12*%%str] ; t3
+    mova                 m5, [tmpq+ 7*%%str]
+    mova                 m4, [tmpq+ 3*%%str]
+    SUMSUB_BADC           w,  5,  2,  4,  3
+    mova    [tmpq+ 7*%%str], m2
+    mova    [tmpq+ 3*%%str], m3
+
+    mova                 m3, [tmpq+28*%%str]
+    TRANSPOSE8x8W         6, 0, 5, 4, 1, 7, 2, 3, [tmpq+24*%%str], [tmpq+16*%%str], 1
+    mova    [tmpq+ 0*%%str], m6
+    mova    [tmpq+ 4*%%str], m0
+    mova    [tmpq+ 8*%%str], m5
+    mova    [tmpq+12*%%str], m4
+    mova    [tmpq+20*%%str], m7
+    mova    [tmpq+24*%%str], m2
+    mova    [tmpq+28*%%str], m3
+
+    mova                 m6, [tmpq+19*%%str]
+    mova                 m0, [tmpq+23*%%str]
+    mova                 m5, [tmpq+11*%%str]
+    mova                 m4, [tmpq+15*%%str]
+    mova                 m1, [tmpq+ 3*%%str]
+    mova                 m7, [tmpq+ 7*%%str]
+    mova                 m3, [tmpq+31*%%str]
+    TRANSPOSE8x8W         6, 0, 5, 4, 1, 7, 2, 3, [tmpq+27*%%str], [tmpq+19*%%str], 1
+    mova    [tmpq+ 3*%%str], m6
+    mova    [tmpq+ 7*%%str], m0
+    mova    [tmpq+11*%%str], m5
+    mova    [tmpq+15*%%str], m4
+    mova    [tmpq+23*%%str], m7
+    mova    [tmpq+27*%%str], m2
+    mova    [tmpq+31*%%str], m3
+
+    mova                 m1, [tmpq+ 6*%%str] ; t14
+    mova                 m0, [tmpq+ 2*%%str] ; t15
+    mova                 m7, [tmpq+21*%%str] ; t16
+    mova                 m6, [tmpq+29*%%str] ; t17
+    SUMSUB_BA             w,  7,  0,  2
+    SUMSUB_BA             w,  6,  1,  2
+    mova    [tmpq+29*%%str], m7
+    mova    [tmpq+ 2*%%str], m0
+    mova    [tmpq+21*%%str], m6
+    mova    [tmpq+ 6*%%str], m1
+
+    mova                 m1, [tmpq+14*%%str] ; t12
+    mova                 m0, [tmpq+10*%%str] ; t13
+    mova                 m5, [tmpq+ 9*%%str] ; t18
+    mova                 m4, [tmpq+13*%%str] ; t19
+    SUMSUB_BA             w,  5,  0,  2
+    SUMSUB_BA             w,  4,  1,  2
+    mova     [tmpq+10*%%str], m0
+    mova     [tmpq+14*%%str], m1
+
+    mova                 m1, [tmpq+22*%%str] ; t10
+    mova                 m0, [tmpq+18*%%str] ; t11
+    mova                 m3, [tmpq+17*%%str] ; t20
+    mova                 m2, [tmpq+ 1*%%str] ; t21
+    SUMSUB_BA             w,  3,  0,  6
+    SUMSUB_BA             w,  2,  1,  6
+    mova     [tmpq+18*%%str], m0
+    mova     [tmpq+22*%%str], m1
+
+    mova                 m7, [tmpq+30*%%str] ; t8
+    mova                 m6, [tmpq+26*%%str] ; t9
+    mova                 m1, [tmpq+25*%%str] ; t22
+    mova                 m0, [tmpq+ 5*%%str] ; t23
+    SUMSUB_BADC           w,  1,  6,  0,  7
+    mova     [tmpq+26*%%str], m6
+    mova     [tmpq+30*%%str], m7
+
+    mova                 m7, [tmpq+29*%%str]
+    TRANSPOSE8x8W         0, 1, 2, 3, 4, 5, 6, 7, [tmpq+21*%%str], [tmpq+17*%%str], 1
+    mova    [tmpq+ 1*%%str], m0
+    mova    [tmpq+ 5*%%str], m1
+    mova    [tmpq+ 9*%%str], m2
+    mova    [tmpq+13*%%str], m3
+    mova    [tmpq+21*%%str], m5
+    mova    [tmpq+25*%%str], m6
+    mova    [tmpq+29*%%str], m7
+
+    mova                 m0, [tmpq+ 2*%%str]
+    mova                 m1, [tmpq+ 6*%%str]
+    mova                 m2, [tmpq+10*%%str]
+    mova                 m3, [tmpq+14*%%str]
+    mova                 m4, [tmpq+18*%%str]
+    mova                 m5, [tmpq+22*%%str]
+    mova                 m7, [tmpq+30*%%str]
+    TRANSPOSE8x8W         0, 1, 2, 3, 4, 5, 6, 7, [tmpq+26*%%str], [tmpq+18*%%str], 1
+    mova    [tmpq+ 2*%%str], m0
+    mova    [tmpq+ 6*%%str], m1
+    mova    [tmpq+10*%%str], m2
+    mova    [tmpq+14*%%str], m3
+    mova    [tmpq+22*%%str], m5
+    mova    [tmpq+26*%%str], m6
+    mova    [tmpq+30*%%str], m7
+%endif
+%else
     ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
     ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
     ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
     ; t20-22 is in m4-6
     ; t24-31 is in m8-15
-    pxor                m7, m7
+
+%if cpuflag(ssse3)
+%define ROUND_REG [pw_512]
+%else
+%define ROUND_REG [pw_32]
+%endif
 
 %macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
     SUMSUB_BA            w, %4, %1, %5
     SUMSUB_BA            w, %3, %2, %5
-    pmulhrsw           m%4, [pw_512]
-    pmulhrsw           m%3, [pw_512]
-    VP9_STORE_2X        %4, %3, %5, %6, %7
+    VP9_IDCT8_WRITEx2   %4, %3, %5, %6, %7, ROUND_REG, 6
 %if %8 == 1
     add               dstq, stride2q
 %endif
-    pmulhrsw           m%2, [pw_512]
-    pmulhrsw           m%1, [pw_512]
-    VP9_STORE_2X        %2, %1, %5, %6, %7, dst_endq
+    VP9_IDCT8_WRITEx2   %2, %1, %5, %6, %7, ROUND_REG, 6, dst_endq
 %if %8 == 1
     sub           dst_endq, stride2q
 %endif
 %endmacro
 
+%if ARCH_X86_64
+    pxor               m10, m10
+
     ; store t0-1 and t30-31
-    mova                m0, [tmpq+ 0*%%str]
-    mova                m1, [tmpq+ 4*%%str]
-    %%STORE_2X2          0,  1, 14, 15, 2, 3, 7
+    mova                m8, [tmpq+ 0*%%str]
+    mova                m9, [tmpq+ 4*%%str]
+    %%STORE_2X2          8,  9,  0,  6, 12, 11, 10
 
     ; store t2-3 and t28-29
-    mova                m0, [tmpq+ 8*%%str]
-    mova                m1, [tmpq+12*%%str]
-    %%STORE_2X2          0,  1, 12, 13, 2, 3, 7
+    mova                m8, [tmpq+ 8*%%str]
+    mova                m9, [tmpq+12*%%str]
+    %%STORE_2X2          8,  9, 14, 15, 12, 11, 10
 
     ; store t4-5 and t26-27
-    mova                m0, [tmpq+16*%%str]
-    mova                m1, [tmpq+20*%%str]
-    %%STORE_2X2          0,  1, 10, 11, 2, 3, 7
+    mova                m8, [tmpq+16*%%str]
+    mova                m9, [tmpq+20*%%str]
+    %%STORE_2X2          8,  9,  7,  1, 12, 11, 10
 
     ; store t6-7 and t24-25
-    mova                m0, [tmpq+24*%%str]
-    mova                m1, [tmpq+28*%%str]
-    %%STORE_2X2          0,  1,  8,  9, 2, 3, 7
+    mova                m8, [tmpq+24*%%str]
+    mova                m9, [tmpq+28*%%str]
+    %%STORE_2X2          8,  9,  4,  5, 12, 11, 10
 
     ; store t8-9 and t22-23
-    mova                m0, [tmpq+ 2*%%str]
-    mova                m1, [tmpq+ 6*%%str]
-    mova                m8, [tmpq+29*%%str]
-    %%STORE_2X2          0,  1,  6,  8, 2, 3, 7
+    mova                m8, [tmpq+30*%%str]
+    mova                m9, [tmpq+26*%%str]
+    mova                m0, [tmpq+ 5*%%str]
+    %%STORE_2X2          8,  9, 13,  0, 12, 11, 10
 
     ; store t10-11 and t20-21
-    mova                m0, [tmpq+10*%%str]
-    mova                m1, [tmpq+14*%%str]
-    %%STORE_2X2          0,  1,  4,  5, 2, 3, 7
+    mova                m8, [tmpq+22*%%str]
+    mova                m9, [tmpq+18*%%str]
+    %%STORE_2X2          8,  9,  2,  3, 12, 11, 10
 
     ; store t12-13 and t18-19
-    mova                m0, [tmpq+18*%%str]
-    mova                m1, [tmpq+22*%%str]
+    mova                m8, [tmpq+14*%%str]
+    mova                m9, [tmpq+10*%%str]
     mova                m5, [tmpq+13*%%str]
     mova                m4, [tmpq+ 9*%%str]
-    %%STORE_2X2          0,  1,  4,  5, 2, 3, 7
+    %%STORE_2X2          8,  9,  4,  5, 12, 11, 10
 
     ; store t14-17
-    mova                m0, [tmpq+26*%%str]
-    mova                m1, [tmpq+30*%%str]
-    mova                m5, [tmpq+ 5*%%str]
-    mova                m4, [tmpq+ 1*%%str]
-    %%STORE_2X2          0,  1,  4,  5, 2, 3, 7, 0
+    mova                m8, [tmpq+ 6*%%str]
+    mova                m9, [tmpq+ 2*%%str]
+    mova                m5, [tmpq+29*%%str]
+    mova                m4, [tmpq+21*%%str]
+    %%STORE_2X2          8,  9,  4,  5, 12, 11, 10, 0
+
+    SWAP                 1, 10 ; zero
+%else
+    mova   [tmpq+ 1*%%str], m1
+    mova   [tmpq+11*%%str], m2
+    mova   [tmpq+15*%%str], m3
+    mova   [tmpq+17*%%str], m4
+    mova   [tmpq+19*%%str], m5
+    pxor                m1, m1
+
+    ; store t0-1 and t30-31
+    mova                m2, [tmpq+ 0*%%str]
+    mova                m3, [tmpq+ 4*%%str]
+    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1
+
+    ; store t2-3 and t28-29
+    mova                m2, [tmpq+ 8*%%str]
+    mova                m3, [tmpq+12*%%str]
+    mova                m0, [tmpq+ 3*%%str]
+    mova                m6, [tmpq+ 7*%%str]
+    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1
+
+    ; store t4-5 and t26-27
+    mova                m2, [tmpq+16*%%str]
+    mova                m3, [tmpq+20*%%str]
+    mova                m0, [tmpq+ 1*%%str]
+    %%STORE_2X2          2,  3,  7,  0, 4, 5, 1
+
+    ; store t6-7 and t24-25
+    mova                m2, [tmpq+24*%%str]
+    mova                m3, [tmpq+28*%%str]
+    mova                m0, [tmpq+17*%%str]
+    mova                m6, [tmpq+19*%%str]
+    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1
+
+    ; store t8-9 and t22-23
+    mova                m2, [tmpq+30*%%str]
+    mova                m3, [tmpq+26*%%str]
+    mova                m0, [tmpq+25*%%str]
+    mova                m6, [tmpq+ 5*%%str]
+    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1
+
+    ; store t10-11 and t20-21
+    mova                m2, [tmpq+22*%%str]
+    mova                m3, [tmpq+18*%%str]
+    mova                m0, [tmpq+11*%%str]
+    mova                m6, [tmpq+15*%%str]
+    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1
+
+    ; store t12-13 and t18-19
+    mova                m2, [tmpq+14*%%str]
+    mova                m3, [tmpq+10*%%str]
+    mova                m6, [tmpq+13*%%str]
+    mova                m0, [tmpq+ 9*%%str]
+    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1
+
+    ; store t14-17
+    mova                m2, [tmpq+ 6*%%str]
+    mova                m3, [tmpq+ 2*%%str]
+    mova                m6, [tmpq+29*%%str]
+    mova                m0, [tmpq+21*%%str]
+    %%STORE_2X2          2,  3,  0,  6, 4, 5, 1, 0
+%endif
+%undef ROUND_REG
 %endif
 %endmacro
 
 %macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
 INIT_XMM %1
-cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
+cglobal vp9_idct_idct_32x32_add, 4, 7 + ARCH_X86_64 * 2, 16, 2048, dst, stride, block, eob
+%if cpuflag(ssse3)
     cmp eobd, 135
     jg .idctfull
     cmp eobd, 34
     jg .idct16x16
     cmp eobd, 1
     jg .idct8x8
+%else
+    cmp eobd, 1
+    jg .idctfull
+%endif
 
     ; dc-only case
+%if cpuflag(ssse3)
     movd                m0, [blockq]
     mova                m1, [pw_11585x2]
     pmulhrsw            m0, m1
     pmulhrsw            m0, m1
+%else
+    DEFINE_ARGS dst, stride, block, coef
+    movsx            coefd, word [blockq]
+    imul             coefd, 11585
+    add              coefd, 8192
+    sar              coefd, 14
+    imul             coefd, 11585
+    add              coefd, (32 << 14) + 8192
+    sar              coefd, 14 + 6
+    movd                m0, coefd
+%endif
     SPLATW              m0, m0, q0000
+%if cpuflag(ssse3)
     pmulhrsw            m0, [pw_512]
+%endif
     pxor                m5, m5
     movd          [blockq], m5
-    DEFINE_ARGS        dst, stride, block, cnt
 %rep 31
     VP9_STORE_2XFULL    0, 1, 2, 3, 4, 5, mmsize
     add               dstq, strideq
@@ -1577,7 +2569,14 @@ cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
     VP9_STORE_2XFULL    0, 1, 2, 3, 4, 5, mmsize
     RET
 
+%if ARCH_X86_64
     DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
+%else
+    DEFINE_ARGS dst, stride, block, stride30, dst_end, stride2, tmp
+%define cntd dword r4m
+%define dst_bakq r0mp
+%endif
+%if cpuflag(ssse3)
 .idct8x8:
     mov               tmpq, rsp
     VP9_IDCT32_1D   blockq, 1, 8
@@ -1589,7 +2588,7 @@ cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
     sub          stride30q, stride2q        ; stride*30
 .loop2_8x8:
     mov               dstq, dst_bakq
-    lea           dst_endq, [dst_bakq+stride30q]
+    lea           dst_endq, [dstq+stride30q]
     VP9_IDCT32_1D     tmpq, 2, 8
     add           dst_bakq, 8
     add               tmpq, 16
@@ -1598,7 +2597,7 @@ cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
 
     ; at the end of the loop, m7 should still be zero
     ; use that to zero out block coefficients
-    ZERO_BLOCK      blockq, 64,  8, m7
+    ZERO_BLOCK      blockq, 64,  8, m1
     RET
 
 .idct16x16:
@@ -1620,7 +2619,7 @@ cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
     sub          stride30q, stride2q        ; stride*30
 .loop2_16x16:
     mov               dstq, dst_bakq
-    lea           dst_endq, [dst_bakq+stride30q]
+    lea           dst_endq, [dstq+stride30q]
     VP9_IDCT32_1D     tmpq, 2, 16
     add           dst_bakq, 8
     add               tmpq, 16
@@ -1629,8 +2628,9 @@ cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
 
     ; at the end of the loop, m7 should still be zero
     ; use that to zero out block coefficients
-    ZERO_BLOCK      blockq, 64, 16, m7
+    ZERO_BLOCK      blockq, 64, 16, m1
     RET
+%endif
 
 .idctfull:
     mov               cntd, 4
@@ -1651,7 +2651,7 @@ cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
     sub          stride30q, stride2q        ; stride*30
 .loop2_full:
     mov               dstq, dst_bakq
-    lea           dst_endq, [dst_bakq+stride30q]
+    lea           dst_endq, [dstq+stride30q]
     VP9_IDCT32_1D     tmpq, 2
     add           dst_bakq, 8
     add               tmpq, 16
@@ -1660,11 +2660,10 @@ cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
 
     ; at the end of the loop, m7 should still be zero
     ; use that to zero out block coefficients
-    ZERO_BLOCK      blockq, 64, 32, m7
+    ZERO_BLOCK      blockq, 64, 32, m1
     RET
 %endmacro
 
+VP9_IDCT_IDCT_32x32_ADD_XMM sse2
 VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
 VP9_IDCT_IDCT_32x32_ADD_XMM avx
-
-%endif ; x86-64



More information about the ffmpeg-cvslog mailing list