[FFmpeg-devel] [PATCH 3/8] arm: Add NEON optimizations for 10 and 12 bit vp9 itxfm

Martin Storsjö martin at martin.st
Wed Jan 18 23:45:10 EET 2017


This work is sponsored by, and copyright, Google.

This is structured similarly to the 8 bit version. In the 8 bit
version, the coefficients are 16 bits, and intermediates are 32 bits.

Here, the coefficients are 32 bit. For the 4x4 transforms for 10 bit
content, the intermediates also fit in 32 bits, but for all other
transforms (4x4 for 12 bit content, and 8x8 and larger for both 10
and 12 bit) the intermediates are 64 bit.

For the existing 8 bit case, the 8x8 transform fit all coefficients in
registers; for 10/12 bit, when the coefficients are 32 bit, the 8x8
transform also has to be done in slices of 4 pixels (just as 16x16 and
32x32 for 8 bit).

The slice width also shrinks from 4 elements to 2 elements in parallel
for the 16x16 and 32x32 cases.

The 16 bit coefficients from idct_coeffs and similar tables also need
to be lenghtened to 32 bit in order to be used in multiplication with
vectors with 32 bit elements. This leads to the fixed coefficient
vectors needing more space, leading to more cases where they have to
be reloaded within the transform (in iadst16).

This technically would need testing in checkasm for subpartitions
in increments of 2, but that slows down normal checkasm runs
excessively.

Examples of relative speedup compared to the C version, from checkasm:
                                     Cortex    A7     A8     A9    A53
vp9_inv_adst_adst_4x4_sub4_add_10_neon:      4.83  11.36   5.22   6.77
vp9_inv_adst_adst_8x8_sub8_add_10_neon:      4.12   7.60   4.06   4.84
vp9_inv_adst_adst_16x16_sub16_add_10_neon:   3.93   8.16   4.52   5.35
vp9_inv_dct_dct_4x4_sub1_add_10_neon:        1.36   2.57   1.41   1.61
vp9_inv_dct_dct_4x4_sub4_add_10_neon:        4.24   8.66   5.06   5.81
vp9_inv_dct_dct_8x8_sub1_add_10_neon:        2.63   4.18   1.68   2.87
vp9_inv_dct_dct_8x8_sub4_add_10_neon:        4.52   9.47   4.24   5.39
vp9_inv_dct_dct_8x8_sub8_add_10_neon:        3.45   7.34   3.45   4.30
vp9_inv_dct_dct_16x16_sub1_add_10_neon:      3.56   6.21   2.47   4.32
vp9_inv_dct_dct_16x16_sub2_add_10_neon:      5.68  12.73   5.28   7.07
vp9_inv_dct_dct_16x16_sub8_add_10_neon:      4.42   9.28   4.24   5.45
vp9_inv_dct_dct_16x16_sub16_add_10_neon:     3.41   7.29   3.35   4.19
vp9_inv_dct_dct_32x32_sub1_add_10_neon:      4.52   8.35   3.83   6.40
vp9_inv_dct_dct_32x32_sub2_add_10_neon:      5.86  13.19   6.14   7.04
vp9_inv_dct_dct_32x32_sub16_add_10_neon:     4.29   8.11   4.59   5.06
vp9_inv_dct_dct_32x32_sub32_add_10_neon:     3.31   5.70   3.56   3.84
vp9_inv_wht_wht_4x4_sub4_add_10_neon:        1.89   2.80   1.82   1.97

The speedup compared to the C functions is around 1.3 to 7x for the
full transforms, even higher for the smaller subpartitions.
---
 libavcodec/arm/Makefile                         |    3 +-
 libavcodec/arm/vp9dsp_init_16bpp_arm_template.c |   47 +
 libavcodec/arm/vp9itxfm_16bpp_neon.S            | 1515 +++++++++++++++++++++++
 3 files changed, 1564 insertions(+), 1 deletion(-)
 create mode 100644 libavcodec/arm/vp9itxfm_16bpp_neon.S

diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
index fb35d25..856c154 100644
--- a/libavcodec/arm/Makefile
+++ b/libavcodec/arm/Makefile
@@ -142,7 +142,8 @@ NEON-OBJS-$(CONFIG_RV40_DECODER)       += arm/rv34dsp_neon.o            \
                                           arm/rv40dsp_neon.o
 NEON-OBJS-$(CONFIG_VORBIS_DECODER)     += arm/vorbisdsp_neon.o
 NEON-OBJS-$(CONFIG_VP6_DECODER)        += arm/vp6dsp_neon.o
-NEON-OBJS-$(CONFIG_VP9_DECODER)        += arm/vp9itxfm_neon.o           \
+NEON-OBJS-$(CONFIG_VP9_DECODER)        += arm/vp9itxfm_16bpp_neon.o     \
+                                          arm/vp9itxfm_neon.o           \
                                           arm/vp9lpf_neon.o             \
                                           arm/vp9mc_16bpp_neon.o        \
                                           arm/vp9mc_neon.o
diff --git a/libavcodec/arm/vp9dsp_init_16bpp_arm_template.c b/libavcodec/arm/vp9dsp_init_16bpp_arm_template.c
index 05efd29..95f2bbc 100644
--- a/libavcodec/arm/vp9dsp_init_16bpp_arm_template.c
+++ b/libavcodec/arm/vp9dsp_init_16bpp_arm_template.c
@@ -141,7 +141,54 @@ static av_cold void vp9dsp_mc_init_arm(VP9DSPContext *dsp)
     }
 }
 
+#define define_itxfm2(type_a, type_b, sz, bpp)                                     \
+void ff_vp9_##type_a##_##type_b##_##sz##x##sz##_add_##bpp##_neon(uint8_t *_dst,    \
+                                                                 ptrdiff_t stride, \
+                                                                 int16_t *_block, int eob)
+#define define_itxfm(type_a, type_b, sz, bpp) define_itxfm2(type_a, type_b, sz, bpp)
+
+#define define_itxfm_funcs(sz, bpp)      \
+    define_itxfm(idct,  idct,  sz, bpp); \
+    define_itxfm(iadst, idct,  sz, bpp); \
+    define_itxfm(idct,  iadst, sz, bpp); \
+    define_itxfm(iadst, iadst, sz, bpp)
+
+define_itxfm_funcs(4,  BPP);
+define_itxfm_funcs(8,  BPP);
+define_itxfm_funcs(16, BPP);
+define_itxfm(idct, idct, 32, BPP);
+define_itxfm(iwht, iwht, 4,  BPP);
+
+
+static av_cold void vp9dsp_itxfm_init_arm(VP9DSPContext *dsp)
+{
+    int cpu_flags = av_get_cpu_flags();
+
+    if (have_neon(cpu_flags)) {
+#define init_itxfm2(tx, sz, bpp)                                               \
+    dsp->itxfm_add[tx][DCT_DCT]   = ff_vp9_idct_idct_##sz##_add_##bpp##_neon;  \
+    dsp->itxfm_add[tx][DCT_ADST]  = ff_vp9_iadst_idct_##sz##_add_##bpp##_neon; \
+    dsp->itxfm_add[tx][ADST_DCT]  = ff_vp9_idct_iadst_##sz##_add_##bpp##_neon; \
+    dsp->itxfm_add[tx][ADST_ADST] = ff_vp9_iadst_iadst_##sz##_add_##bpp##_neon
+#define init_itxfm(tx, sz, bpp) init_itxfm2(tx, sz, bpp)
+
+#define init_idct2(tx, nm, bpp)     \
+    dsp->itxfm_add[tx][DCT_DCT]   = \
+    dsp->itxfm_add[tx][ADST_DCT]  = \
+    dsp->itxfm_add[tx][DCT_ADST]  = \
+    dsp->itxfm_add[tx][ADST_ADST] = ff_vp9_##nm##_add_##bpp##_neon
+#define init_idct(tx, nm, bpp) init_idct2(tx, nm, bpp)
+
+        init_itxfm(TX_4X4,   4x4,   BPP);
+        init_itxfm(TX_8X8,   8x8,   BPP);
+        init_itxfm(TX_16X16, 16x16, BPP);
+        init_idct(TX_32X32, idct_idct_32x32, BPP);
+        init_idct(4,        iwht_iwht_4x4,   BPP);
+    }
+}
+
 av_cold void INIT_FUNC(VP9DSPContext *dsp)
 {
     vp9dsp_mc_init_arm(dsp);
+    vp9dsp_itxfm_init_arm(dsp);
 }
diff --git a/libavcodec/arm/vp9itxfm_16bpp_neon.S b/libavcodec/arm/vp9itxfm_16bpp_neon.S
new file mode 100644
index 0000000..e6e9440
--- /dev/null
+++ b/libavcodec/arm/vp9itxfm_16bpp_neon.S
@@ -0,0 +1,1515 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+#include "neon.S"
+
+const itxfm4_coeffs, align=4
+        .short  11585, 0, 6270, 15137
+iadst4_coeffs:
+        .short  5283, 15212, 9929, 13377
+endconst
+
+const iadst8_coeffs, align=4
+        .short  16305, 1606, 14449, 7723, 10394, 12665, 4756, 15679
+idct_coeffs:
+        .short  11585, 0, 6270, 15137, 3196, 16069, 13623, 9102
+        .short  1606, 16305, 12665, 10394, 7723, 14449, 15679, 4756
+        .short  804, 16364, 12140, 11003, 7005, 14811, 15426, 5520
+        .short  3981, 15893, 14053, 8423, 9760, 13160, 16207, 2404
+endconst
+
+const iadst16_coeffs, align=4
+        .short  16364, 804, 15893, 3981, 11003, 12140, 8423, 14053
+        .short  14811, 7005, 13160, 9760, 5520, 15426, 2404, 16207
+endconst
+
+@ Do two 4x4 transposes, using q registers for the subtransposes that don't
+@ need to address the individual d registers.
+@ r0,r1 == rq1, r2,r3 == rq1, etc
+.macro transpose32_q_2x_4x4 rq0, rq1, rq2, rq3, rq4, rq5, rq6, rq7, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15
+        vswp             \r1,  \r4  @ vtrn.64 \rq0, \rq2
+        vswp             \r3,  \r6  @ vtrn.64 \rq1, \rq3
+        vswp             \r9,  \r12 @ vtrn.64 \rq4, \rq6
+        vswp             \r11, \r14 @ vtrn.64 \rq5, \rq7
+        vtrn.32          \rq0, \rq1
+        vtrn.32          \rq2, \rq3
+        vtrn.32          \rq4, \rq5
+        vtrn.32          \rq6, \rq7
+.endm
+
+@ Do eight 2x2 transposes.
+.macro transpose32_8x_2x2 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15
+        vtrn.32          \r0,  \r1
+        vtrn.32          \r2,  \r3
+        vtrn.32          \r4,  \r5
+        vtrn.32          \r6,  \r7
+        vtrn.32          \r8,  \r9
+        vtrn.32          \r10, \r11
+        vtrn.32          \r12, \r13
+        vtrn.32          \r14, \r15
+.endm
+
+@ out1 = ((in1 + in2) * d0[0] + (1 << 13)) >> 14
+@ out2 = ((in1 - in2) * d0[0] + (1 << 13)) >> 14
+@ in/out are d registers
+.macro mbutterfly0 out1, out2, in1, in2, tmpd1, tmpd2, tmpq3, tmpq4, neg=0
+        vadd.s32        \tmpd1, \in1,  \in2
+        vsub.s32        \tmpd2, \in1,  \in2
+.if \neg > 0
+        vneg.s32        \tmpd1, \tmpd1
+.endif
+        vmull.s32       \tmpq3, \tmpd1, d0[0]
+        vmull.s32       \tmpq4, \tmpd2, d0[0]
+        vrshrn.s64      \out1, \tmpq3, #14
+        vrshrn.s64      \out2, \tmpq4, #14
+.endm
+
+@ out1,out2 = ((in1 + in2) * d0[0] + (1 << 13)) >> 14
+@ out3,out4 = ((in1 - in2) * d0[0] + (1 << 13)) >> 14
+@ Same as mbutterfly0, but with input being 2 q registers, output
+@ being 4 d registers.
+@ This can do with either 4 or 6 temporary q registers.
+.macro dmbutterfly0 out1, out2, out3, out4, in1, in2, tmpq1, tmpq2, tmpd11, tmpd12, tmpd21, tmpd22, tmpq3, tmpq4, tmpq5, tmpq6
+        vadd.s32        \tmpq1, \in1,  \in2
+        vsub.s32        \tmpq2, \in1,  \in2
+        vmull.s32       \tmpq3, \tmpd11, d0[0]
+        vmull.s32       \tmpq4, \tmpd12, d0[0]
+.ifb \tmpq5
+        vrshrn.s64      \out1, \tmpq3, #14
+        vrshrn.s64      \out2, \tmpq4, #14
+        vmull.s32       \tmpq3, \tmpd21, d0[0]
+        vmull.s32       \tmpq4, \tmpd22, d0[0]
+        vrshrn.s64      \out3, \tmpq3, #14
+        vrshrn.s64      \out4, \tmpq4, #14
+.else
+        vmull.s32       \tmpq5, \tmpd21, d0[0]
+        vmull.s32       \tmpq6, \tmpd22, d0[0]
+        vrshrn.s64      \out1, \tmpq3, #14
+        vrshrn.s64      \out2, \tmpq4, #14
+        vrshrn.s64      \out3, \tmpq5, #14
+        vrshrn.s64      \out4, \tmpq6, #14
+.endif
+.endm
+
+@ out1 = in1 * coef1 - in2 * coef2
+@ out2 = in1 * coef2 + in2 * coef1
+@ out are 2 q registers, in are 2 d registers
+.macro mbutterfly_l out1, out2, in1, in2, coef1, coef2, neg=0
+        vmull.s32       \out1, \in1, \coef1
+        vmlsl.s32       \out1, \in2, \coef2
+.if \neg
+        vmov.s64        \out2, #0
+        vmlsl.s32       \out2, \in1, \coef2
+        vmlsl.s32       \out2, \in2, \coef1
+.else
+        vmull.s32       \out2, \in1, \coef2
+        vmlal.s32       \out2, \in2, \coef1
+.endif
+.endm
+
+@ out1,out2 = in1,in2 * coef1 - in3,in4 * coef2
+@ out3,out4 = in1,in2 * coef2 + in3,in4 * coef1
+@ out are 4 q registers, in are 4 d registers
+.macro dmbutterfly_l out1, out2, out3, out4, in1, in2, in3, in4, coef1, coef2
+        vmull.s32       \out1, \in1, \coef1
+        vmull.s32       \out2, \in2, \coef1
+        vmull.s32       \out3, \in1, \coef2
+        vmull.s32       \out4, \in2, \coef2
+        vmlsl.s32       \out1, \in3, \coef2
+        vmlsl.s32       \out2, \in4, \coef2
+        vmlal.s32       \out3, \in3, \coef1
+        vmlal.s32       \out4, \in4, \coef1
+.endm
+
+@ inout1 = (inout1 * coef1 - inout2 * coef2 + (1 << 13)) >> 14
+@ inout2 = (inout1 * coef2 + inout2 * coef1 + (1 << 13)) >> 14
+@ inout are 2 d registers, tmp are 2 q registers
+.macro mbutterfly inout1, inout2, coef1, coef2, tmp1, tmp2, neg=0
+        mbutterfly_l    \tmp1, \tmp2, \inout1, \inout2, \coef1, \coef2, \neg
+        vrshrn.s64      \inout1, \tmp1,  #14
+        vrshrn.s64      \inout2, \tmp2,  #14
+.endm
+
+@ inout1,inout2 = (inout1,inout2 * coef1 - inout3,inout4 * coef2 + (1 << 13)) >> 14
+@ inout3,inout4 = (inout1,inout2 * coef2 + inout3,inout4 * coef1 + (1 << 13)) >> 14
+@ inout are 4 d registers, tmp are 4 q registers
+.macro dmbutterfly inout1, inout2, inout3, inout4, coef1, coef2, tmp1, tmp2, tmp3, tmp4
+        dmbutterfly_l   \tmp1, \tmp2, \tmp3, \tmp4, \inout1, \inout2, \inout3, \inout4, \coef1, \coef2
+        vrshrn.s64      \inout1, \tmp1,  #14
+        vrshrn.s64      \inout2, \tmp2,  #14
+        vrshrn.s64      \inout3, \tmp3,  #14
+        vrshrn.s64      \inout4, \tmp4,  #14
+.endm
+
+@ out1 = in1 + in2
+@ out2 = in1 - in2
+.macro butterfly out1, out2, in1, in2
+        vadd.s32        \out1, \in1, \in2
+        vsub.s32        \out2, \in1, \in2
+.endm
+
+@ out1 = in1 - in2
+@ out2 = in1 + in2
+.macro butterfly_r out1, out2, in1, in2
+        vsub.s32        \out1, \in1, \in2
+        vadd.s32        \out2, \in1, \in2
+.endm
+
+@ out1 = (in1 + in2 + (1 << 13)) >> 14
+@ out2 = (in1 - in2 + (1 << 13)) >> 14
+@ out are 2 d registers, in are 2 q registers, tmp are 2 q registers
+.macro butterfly_n out1, out2, in1, in2, tmp1, tmp2
+        vadd.s64        \tmp1, \in1, \in2
+        vsub.s64        \tmp2, \in1, \in2
+        vrshrn.s64      \out1, \tmp1,  #14
+        vrshrn.s64      \out2, \tmp2,  #14
+.endm
+
+@ out1,out2 = (in1,in2 + in3,in4 + (1 << 13)) >> 14
+@ out3,out4 = (in1,in2 - in3,in4 + (1 << 13)) >> 14
+@ out are 4 d registers, in are 4 q registers, tmp are 4 q registers
+.macro dbutterfly_n out1, out2, out3, out4, in1, in2, in3, in4, tmp1, tmp2, tmp3, tmp4
+        vadd.s64        \tmp1, \in1, \in3
+        vadd.s64        \tmp2, \in2, \in4
+        vsub.s64        \tmp3, \in1, \in3
+        vsub.s64        \tmp4, \in2, \in4
+        vrshrn.s64      \out1, \tmp1,  #14
+        vrshrn.s64      \out2, \tmp2,  #14
+        vrshrn.s64      \out3, \tmp3,  #14
+        vrshrn.s64      \out4, \tmp4,  #14
+.endm
+
+
+.macro iwht4_10 c0, c1, c2, c3, cd0, cd1, cd2, cd3, cd4, cd5, cd6, cd7
+        vadd.i32        \c0,  \c0,  \c1
+        vsub.i32        q11,  \c2,  \c3
+        vsub.i32        q10,  \c0,  q11
+        vshr.s32        q10,  q10,  #1
+        vsub.i32        \c2,  q10,  \c1
+        vsub.i32        \c1,  q10,  \c3
+        vadd.i32        \c3,  q11,  \c2
+        vsub.i32        \c0,  \c0,  \c1
+.endm
+
+.macro iwht4_12 c0, c1, c2, c3, cd0, cd1, cd2, cd3, cd4, cd5, cd6, cd7
+        iwht4_10        \c0, \c1, \c2, \c3, \cd0, \cd1, \cd2, \cd3, \cd4, \cd5, \cd6, \cd7
+.endm
+
+@ c0 == cd0,cd1, c1 == cd2,cd3
+.macro idct4_10 c0, c1, c2, c3, cd0, cd1, cd2, cd3, cd4, cd5, cd6, cd7
+        vmul.s32        q13,  \c1,  d1[1]
+        vmul.s32        q11,  \c1,  d1[0]
+        vadd.i32        q14,  \c0,  \c2
+        vsub.i32        q15,  \c0,  \c2
+        vmla.s32        q13,  \c3,  d1[0]
+        vmul.s32        q12,  q14,  d0[0]
+        vmul.s32        q10,  q15,  d0[0]
+        vmls.s32        q11,  \c3,  d1[1]
+        vrshr.s32       q13,  q13,  #14
+        vrshr.s32       q12,  q12,  #14
+        vrshr.s32       q10,  q10,  #14
+        vrshr.s32       q11,  q11,  #14
+        vadd.i32        \c0,  q12,  q13
+        vsub.i32        \c3,  q12,  q13
+        vadd.i32        \c1,  q10,  q11
+        vsub.i32        \c2,  q10,  q11
+.endm
+
+.macro idct4_12 c0, c1, c2, c3, cd0, cd1, cd2, cd3, cd4, cd5, cd6, cd7
+        vmull.s32       q13,  \cd2, d1[1]
+        vmull.s32       q15,  \cd3, d1[1]
+        vmull.s32       q11,  \cd2, d1[0]
+        vmull.s32       q3,   \cd3, d1[0]
+        vadd.i32        q14,  \c0,  \c2
+        vsub.i32        q2,   \c0,  \c2
+        vmlal.s32       q13,  \cd6, d1[0]
+        vmlal.s32       q15,  \cd7, d1[0]
+        vmull.s32       q12,  d28,  d0[0]
+        vmull.s32       q14,  d29,  d0[0]
+        vmull.s32       q10,  d4,   d0[0]
+        vmull.s32       q8,   d5,   d0[0]
+        vmlsl.s32       q11,  \cd6, d1[1]
+        vmlsl.s32       q3,   \cd7, d1[1]
+        vrshrn.s64      d26,  q13,  #14
+        vrshrn.s64      d27,  q15,  #14
+        vrshrn.s64      d24,  q12,  #14
+        vrshrn.s64      d25,  q14,  #14
+        vrshrn.s64      d20,  q10,  #14
+        vrshrn.s64      d21,  q8,   #14
+        vrshrn.s64      d22,  q11,  #14
+        vrshrn.s64      d23,  q3,   #14
+        vadd.i32        \c0,  q12,  q13
+        vsub.i32        \c3,  q12,  q13
+        vadd.i32        \c1,  q10,  q11
+        vsub.i32        \c2,  q10,  q11
+.endm
+
+.macro iadst4_10 c0, c1, c2, c3, cd0, cd1, cd2, cd3, cd4, cd5, cd6, cd7
+        vmul.s32        q10,  \c0,  d2[0]
+        vmla.s32        q10,  \c2,  d2[1]
+        vmla.s32        q10,  \c3,  d3[0]
+        vmul.s32        q11,  \c0,  d3[0]
+        vmls.s32        q11,  \c2,  d2[0]
+        vsub.s32        \c0,  \c0,  \c2
+        vmls.s32        q11,  \c3,  d2[1]
+        vadd.s32        \c0,  \c0,  \c3
+        vmul.s32        q13,  \c1,  d3[1]
+        vmul.s32        q12,  \c0,  d3[1]
+        vadd.s32        q14,  q10,  q13
+        vadd.s32        q15,  q11,  q13
+        vrshr.s32       \c0,  q14,  #14
+        vadd.s32        q10,  q10,  q11
+        vrshr.s32       \c1,  q15,  #14
+        vsub.s32        q10,  q10,  q13
+        vrshr.s32       \c2,  q12,  #14
+        vrshr.s32       \c3,  q10,  #14
+.endm
+
+.macro iadst4_12 c0, c1, c2, c3, cd0, cd1, cd2, cd3, cd4, cd5, cd6, cd7
+        vmull.s32       q10,  \cd0, d2[0]
+        vmull.s32       q4,   \cd1, d2[0]
+        vmlal.s32       q10,  \cd4, d2[1]
+        vmlal.s32       q4,   \cd5, d2[1]
+        vmlal.s32       q10,  \cd6, d3[0]
+        vmlal.s32       q4,   \cd7, d3[0]
+        vmull.s32       q11,  \cd0, d3[0]
+        vmull.s32       q5,   \cd1, d3[0]
+        vmlsl.s32       q11,  \cd4, d2[0]
+        vmlsl.s32       q5,   \cd5, d2[0]
+        vsub.s32        \c0,  \c0,  \c2
+        vmlsl.s32       q11,  \cd6, d2[1]
+        vmlsl.s32       q5,   \cd7, d2[1]
+        vadd.s32        \c0,  \c0,  \c3
+        vmull.s32       q13,  \cd2, d3[1]
+        vmull.s32       q6,   \cd3, d3[1]
+        vmull.s32       q12,  \cd0, d3[1]
+        vmull.s32       q7,   \cd1, d3[1]
+        vadd.s64        q14,  q10,  q13
+        vadd.s64        q2,   q4,   q6
+        vadd.s64        q15,  q11,  q13
+        vadd.s64        q3,   q5,   q6
+        vrshrn.s64      \cd1, q2,   #14
+        vrshrn.s64      \cd0, q14,  #14
+        vadd.s64        q10,  q10,  q11
+        vadd.s64        q4,   q4,   q5
+        vrshrn.s64      \cd3, q3,   #14
+        vrshrn.s64      \cd2, q15,  #14
+        vsub.s64        q10,  q10,  q13
+        vsub.s64        q4,   q4,   q6
+        vrshrn.s64      \cd4, q12,  #14
+        vrshrn.s64      \cd5, q7,   #14
+        vrshrn.s64      \cd6, q10,  #14
+        vrshrn.s64      \cd7, q4,   #14
+.endm
+
+@ The public functions in this file have got the following signature:
+@ void itxfm_add(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
+
+.macro itxfm_func4x4 txfm1, txfm2, bpp
+function ff_vp9_\txfm1\()_\txfm2\()_4x4_add_\bpp\()_neon, export=1
+.ifc \txfm1,\txfm2
+.ifc \txfm1,idct
+        movrel          r12, itxfm4_coeffs
+        vld1.16         {d0}, [r12,:64]
+        vmovl.s16       q0,  d0
+.endif
+.ifc \txfm1,iadst
+        movrel          r12, iadst4_coeffs
+        vld1.16         {d1}, [r12,:64]
+        vmovl.s16       q1,  d1
+.endif
+.else
+        movrel          r12, itxfm4_coeffs
+        vld1.16         {q0}, [r12,:128]
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+.endif
+.if \bpp > 10
+.ifnc \txfm1\()_\txfm2,idct_idct
+        @ iadst4_12 needs q4-q7
+        vpush           {q4-q7}
+.endif
+.endif
+
+        vmov.i32        q14, #0
+        vmov.i32        q15, #0
+.ifc \txfm1\()_\txfm2,idct_idct
+        cmp             r3,  #1
+        bne             1f
+        @ DC-only for idct/idct
+        vld1.32         {d4[]},   [r2,:32]
+        vmull.s32       q2,  d4,  d0[0]
+        vrshrn.s64      d4,  q2,  #14
+        vmull.s32       q2,  d4,  d0[0]
+        vrshrn.s64      d4,  q2,  #14
+        vst1.32         {d30[0]}, [r2,:32]
+        vdup.32         q2,  d4[0]
+        vmov            q3,  q2
+        vmov            q8,  q2
+        vmov            q9,  q2
+        b               2f
+.endif
+
+1:
+        vld1.32         {q2-q3},   [r2,:128]
+        vst1.32         {q14-q15}, [r2,:128]!
+        vld1.32         {q8-q9},   [r2,:128]
+
+.ifc \txfm1,iwht
+        vshr.s32        q2,  q2,  #2
+        vshr.s32        q3,  q3,  #2
+        vshr.s32        q8,  q8,  #2
+        vshr.s32        q9,  q9,  #2
+.endif
+
+        vst1.16         {q14-q15}, [r2,:128]!
+        \txfm1\()4_\bpp q2,  q3,  q8,  q9,  d4,  d5,  d6,  d7,  d16, d17, d18, d19
+
+        @ Transpose 4x4 with 32 bit elements
+        vtrn.32         q2,  q3
+        vtrn.32         q8,  q9
+        vswp            d5,  d16
+        vswp            d7,  d18
+
+        \txfm2\()4_\bpp q2,  q3,  q8,  q9,  d4,  d5,  d6,  d7,  d16, d17, d18, d19
+2:
+        vmvn.u16        q15, #((0xffff << \bpp) & 0xffff)
+        vld1.16         {d0},  [r0,:64], r1
+        vld1.16         {d1},  [r0,:64], r1
+.ifnc \txfm1,iwht
+        vrshr.s32       q2,  q2,  #4
+        vrshr.s32       q3,  q3,  #4
+        vrshr.s32       q8,  q8,  #4
+        vrshr.s32       q9,  q9,  #4
+.endif
+        vaddw.u16       q2,  q2,  d0
+        vaddw.u16       q3,  q3,  d1
+        vld1.16         {d2},  [r0,:64], r1
+        vld1.16         {d3},  [r0,:64], r1
+        vqmovun.s32     d0,  q2
+        vqmovun.s32     d1,  q3
+        sub             r0,  r0,  r1, lsl #2
+
+        vaddw.u16       q8,  q8,  d2
+        vmin.u16        q0,  q0,  q15
+        vaddw.u16       q9,  q9,  d3
+        vst1.16         {d0},  [r0,:64], r1
+        vqmovun.s32     d2,  q8
+        vqmovun.s32     d3,  q9
+        vmin.u16        q1,  q1,  q15
+
+        vst1.16         {d1},  [r0,:64], r1
+        vst1.16         {d2},  [r0,:64], r1
+        vst1.16         {d3},  [r0,:64], r1
+
+.if \bpp > 10
+.ifnc \txfm1\()_\txfm2,idct_idct
+        vpop            {q4-q7}
+.endif
+.endif
+        bx              lr
+endfunc
+.endm
+
+.macro itxfm_funcs4x4 bpp
+itxfm_func4x4 idct,  idct,  \bpp
+itxfm_func4x4 iadst, idct,  \bpp
+itxfm_func4x4 idct,  iadst, \bpp
+itxfm_func4x4 iadst, iadst, \bpp
+itxfm_func4x4 iwht,  iwht,  \bpp
+.endm
+
+itxfm_funcs4x4 10
+itxfm_funcs4x4 12
+
+.macro idct8
+        dmbutterfly0    d16, d17, d24, d25, q8,  q12, q2, q4, d4, d5, d8, d9, q3, q2, q5, q4 @ q8 = t0a, q12 = t1a
+        dmbutterfly     d20, d21, d28, d29, d1[0], d1[1], q2,  q3,  q4,  q5 @ q10 = t2a, q14 = t3a
+        dmbutterfly     d18, d19, d30, d31, d2[0], d2[1], q2,  q3,  q4,  q5 @ q9  = t4a, q15 = t7a
+        dmbutterfly     d26, d27, d22, d23, d3[0], d3[1], q2,  q3,  q4,  q5 @ q13 = t5a, q11 = t6a
+
+        butterfly       q2,  q14, q8,  q14 @ q2 = t0, q14 = t3
+        butterfly       q3,  q10, q12, q10 @ q3 = t1, q10 = t2
+        butterfly       q4,  q13, q9,  q13 @ q4 = t4, q13 = t5a
+        butterfly       q5,  q11, q15, q11 @ q5 = t7, q11 = t6a
+
+        butterfly       q8,  q15, q2,  q5  @ q8 = out[0], q15 = out[7]
+
+        dmbutterfly0    d4,  d5,  d10, d11, q11, q13, q9,  q13, d18, d19, d26, d27, q2,  q5, q11, q12 @ q2 = t6, q5 = t5
+
+        butterfly       q11, q12, q14, q4  @ q11 = out[3], q12 = out[4]
+        butterfly       q9,  q14, q3,  q2  @ q9 = out[1],  q14 = out[6]
+        butterfly_r     q13, q10, q10, q5  @ q13 = out[5], q10 = out[2]
+.endm
+
+.macro iadst8
+        movrel          r12, iadst8_coeffs
+        vld1.16         {q1}, [r12,:128]!
+        vmovl.s16       q0,  d2
+        vmovl.s16       q1,  d3
+
+        dmbutterfly_l   q4,  q5,  q2,  q3,  d30, d31, d16, d17, d0[1], d0[0] @ q4,q5  = t1a, q2,q3 = t0a
+        dmbutterfly_l   q8,  q15, q6,  q7,  d22, d23, d24, d25, d2[1], d2[0] @ q8,q15 = t5a, q6,q7 = t4a
+
+        dbutterfly_n    d22, d23, d4,  d5,  q2,  q3,  q6,  q7,  q11, q12, q2,  q3 @ q11 = t0, q2 = t4
+
+        dbutterfly_n    d24, d25, d6,  d7,  q4,  q5,  q8,  q15, q12, q3,  q6,  q7 @ q12 = t1, q3 = t5
+
+        dmbutterfly_l   q6,  q7,  q4,  q5,  d26, d27, d20, d21, d1[1], d1[0] @ q6,q7 = t3a, q4,q5 = t2a
+        dmbutterfly_l   q10, q13, q8,  q15, d18, d19, d28, d29, d3[1], d3[0] @ q10,q13 = t7a, q8,q15 = t6a
+
+        dbutterfly_n    d18, d19, d8,  d9,  q4,  q5,  q8,  q15, q9,  q14, q4, q5 @ q9 = t2, q4 = t6
+        dbutterfly_n    d16, d17, d12, d13, q6,  q7,  q10, q13, q8,  q15, q6, q7 @ q8 = t3, q6 = t7
+
+        movrel          r12, idct_coeffs
+        vld1.16         {q0}, [r12,:128]
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+
+        butterfly       q15, q12, q12, q8 @ q15 = -out[7], q12 = t3
+        vneg.s32        q15, q15          @ q15 = out[7]
+        butterfly       q8,  q9,  q11, q9 @ q8 = out[0], q9 = t2
+
+        dmbutterfly_l   q10, q11, q5,  q7,  d4,  d5,  d6,  d7,  d1[0], d1[1] @ q10,q11 = t5a, q5,q7 = t4a
+        dmbutterfly_l   q2,  q3,  q13, q14, d12, d13, d8,  d9,  d1[1], d1[0] @ q2,q3 = t6a, q13,q14 = t7a
+
+        dbutterfly_n    d28, d29, d8,  d9,  q10, q11, q13, q14, q4,  q6,  q10, q11 @ q14 = out[6], q4 = t7
+
+        dmbutterfly0    d22, d23, d24, d25, q9,  q12, q6, q13, d12, d13, d26, d27, q9, q10 @ q11 = -out[3], q12 = out[4]
+        vneg.s32        q11, q11      @ q11 = out[3]
+
+        dbutterfly_n    d18, d19, d4,  d5,  q5,  q7,  q2,  q3,  q9, q10, q2,  q3 @ q9 = -out[1], q2 = t6
+        vneg.s32        q9,  q9       @ q9 = out[1]
+
+        dmbutterfly0    d20, d21, d26, d27, q2,  q4,  q3, q5,  d6,  d7,  d10, d11, q6,  q7 @ q10 = out[2], q13 = -out[5]
+        vneg.s32        q13, q13      @ q13 = out[5]
+.endm
+
+function idct8x8_dc_add_neon
+        movrel          r12, idct_coeffs
+        vld1.16         {d0}, [r12,:64]
+
+        vmov.i32        q2,  #0
+        vmovl.s16       q0,  d0
+
+        vld1.32         {d16[]}, [r2,:32]
+        vmull.s32       q8,  d16, d0[0]
+        vrshrn.s64      d16, q8,  #14
+        vmull.s32       q8,  d16, d0[0]
+        vrshrn.s64      d16, q8,  #14
+        vdup.32         q8,  d16[0]
+        vst1.32         {d4[0]}, [r2,:32]
+
+        vrshr.s32       q8,  q8,  #5
+        vdup.s16        q15, r8
+
+        mov             r3,  r0
+        mov             r12, #8
+1:
+        @ Loop to add the constant from q8 into all 8x8 outputs
+        subs            r12, r12, #2
+        vld1.16         {q2},  [r0,:128], r1
+        vaddw.u16       q10, q8,  d4
+        vld1.16         {q3},  [r0,:128], r1
+        vaddw.u16       q11, q8,  d5
+        vaddw.u16       q12, q8,  d6
+        vaddw.u16       q13, q8,  d7
+        vqmovun.s32     d4,  q10
+        vqmovun.s32     d5,  q11
+        vqmovun.s32     d6,  q12
+        vqmovun.s32     d7,  q13
+        vmin.u16        q2,  q2,  q15
+        vst1.16         {q2},  [r3,:128], r1
+        vmin.u16        q3,  q3,  q15
+        vst1.16         {q3},  [r3,:128], r1
+        bne             1b
+
+        pop             {r4-r8,pc}
+endfunc
+.ltorg
+
+.macro itxfm8_1d_funcs txfm
+@ Read a vertical 4x8 slice out of a 8x8 matrix, do a transform on it,
+@ transpose into a horizontal 8x4 slice and store.
+@ r0 = dst (temp buffer)
+@ r1 = slice offset
+@ r2 = src
+function \txfm\()8_1d_4x8_pass1_neon
+        mov             r12, #32
+        vmov.s32        q2,  #0
+.irp i, 8, 9, 10, 11, 12, 13, 14, 15
+        vld1.32         {q\i}, [r2,:128]
+        vst1.32         {q2},  [r2,:128], r12
+.endr
+
+        \txfm\()8
+
+        @ Do two 4x4 transposes. Originally, q8-q15 contain the
+        @ 8 rows. Afterwards, q8-q11, q12-q15 contain the transposed
+        @ 4x4 blocks.
+        transpose32_q_2x_4x4 q8,  q9,  q10, q11, q12, q13, q14, q15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+
+        @ Store the transposed 4x4 blocks horizontally.
+        cmp             r1,  #4
+        beq             1f
+.irp i, 8, 12, 9, 13, 10, 14, 11, 15
+        vst1.32         {q\i}, [r0,:128]!
+.endr
+        bx              lr
+1:
+        @ Special case: For the last input column (r1 == 4),
+        @ which would be stored as the last row in the temp buffer,
+        @ don't store the first 4x4 block, but keep it in registers
+        @ for the first slice of the second pass (where it is the
+        @ last 4x4 block).
+.irp i, 12, 13, 14, 15
+        add             r0,  r0,  #16
+        vst1.32         {q\i}, [r0,:128]!
+.endr
+        vmov            q12, q8
+        vmov            q13, q9
+        vmov            q14, q10
+        vmov            q15, q11
+        bx              lr
+endfunc
+
+@ Read a vertical 4x8 slice out of a 8x8 matrix, do a transform on it,
+@ load the destination pixels (from a similar 4x8 slice), add and store back.
+@ r0 = dst
+@ r1 = dst stride
+@ r2 = src (temp buffer)
+@ r3 = slice offset
+function \txfm\()8_1d_4x8_pass2_neon
+        mov             r12, #32
+.irp i, 8, 9, 10, 11
+        vld1.32         {q\i}, [r2,:128], r12
+.endr
+        cmp             r3,  #0
+        beq             1f
+.irp i, 12, 13, 14, 15
+        vld1.32         {q\i}, [r2,:128], r12
+.endr
+1:
+
+        add             r3,  r0,  r1
+        lsl             r1,  r1,  #1
+        \txfm\()8
+
+        vdup.s16        q4,  r8
+.macro load_add_store coef0, coef1, coef2, coef3
+        vld1.16         {d4},   [r0,:64], r1
+        vld1.16         {d5},   [r3,:64], r1
+        vld1.16         {d6},   [r0,:64], r1
+        vld1.16         {d7},   [r3,:64], r1
+
+        vrshr.s32       \coef0, \coef0, #5
+        vrshr.s32       \coef1, \coef1, #5
+        vrshr.s32       \coef2, \coef2, #5
+        vrshr.s32       \coef3, \coef3, #5
+
+        vaddw.u16       \coef0, \coef0, d4
+        vaddw.u16       \coef1, \coef1, d5
+        vaddw.u16       \coef2, \coef2, d6
+        vaddw.u16       \coef3, \coef3, d7
+
+        sub             r0,  r0,  r1, lsl #1
+        sub             r3,  r3,  r1, lsl #1
+
+        vqmovun.s32     d4,  \coef0
+        vqmovun.s32     d5,  \coef1
+        vqmovun.s32     d6,  \coef2
+        vqmovun.s32     d7,  \coef3
+
+        vmin.u16        q2,  q2,  q4
+        vmin.u16        q3,  q3,  q4
+
+        vst1.16         {d4},  [r0,:64], r1
+        vst1.16         {d5},  [r3,:64], r1
+        vst1.16         {d6},  [r0,:64], r1
+        vst1.16         {d7},  [r3,:64], r1
+.endm
+        load_add_store  q8,  q9,  q10, q11
+        load_add_store  q12, q13, q14, q15
+.purgem load_add_store
+
+        bx              lr
+endfunc
+.endm
+
+itxfm8_1d_funcs idct
+itxfm8_1d_funcs iadst
+
+.macro itxfm_func8x8 txfm1, txfm2
+function vp9_\txfm1\()_\txfm2\()_8x8_add_16_neon
+.ifc \txfm1\()_\txfm2,idct_idct
+        cmp             r3,  #1
+        beq             idct8x8_dc_add_neon
+.endif
+.ifnc \txfm1\()_\txfm2,idct_idct
+        vpush           {q4-q7}
+.else
+        vpush           {q4-q5}
+.endif
+
+        @ Align the stack, allocate a temp buffer
+T       mov             r7,  sp
+T       and             r7,  r7,  #15
+A       and             r7,  sp,  #15
+        add             r7,  r7,  #256
+        sub             sp,  sp,  r7
+
+        mov             r4,  r0
+        mov             r5,  r1
+        mov             r6,  r2
+
+.ifc \txfm1,idct
+        movrel          r12, idct_coeffs
+        vld1.16         {q0}, [r12,:128]
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+.endif
+
+.irp i, 0, 4
+        add             r0,  sp,  #(\i*32)
+.ifc \txfm1\()_\txfm2,idct_idct
+.if \i == 4
+        cmp             r3,  #12
+        ble             1f
+.endif
+.endif
+        mov             r1,  #\i
+        add             r2,  r6,  #(\i*4)
+        bl              \txfm1\()8_1d_4x8_pass1_neon
+.endr
+.ifc \txfm1\()_\txfm2,idct_idct
+        b               3f
+1:
+        @ For all-zero slices in pass 1, set q12-q15 to zero, for the in-register
+        @ passthrough of coefficients to pass 2 and clear the end of the temp buffer
+        vmov.i32        q12, #0
+        vmov.i32        q13, #0
+        vmov.i32        q14, #0
+        vmov.i32        q15, #0
+.rept 4
+        vst1.32         {q12-q13}, [r0,:128]!
+.endr
+3:
+.endif
+.ifc \txfm1\()_\txfm2,iadst_idct
+        movrel          r12, idct_coeffs
+        vld1.16         {q0}, [r12,:128]
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+.endif
+.irp i, 0, 4
+        add             r0,  r4,  #(\i*2)
+        mov             r1,  r5
+        add             r2,  sp,  #(\i*4)
+        mov             r3,  #\i
+        bl              \txfm2\()8_1d_4x8_pass2_neon
+.endr
+
+        add             sp,  sp,  r7
+.ifnc \txfm1\()_\txfm2,idct_idct
+        vpop            {q4-q7}
+.else
+        vpop            {q4-q5}
+.endif
+        pop             {r4-r8,pc}
+endfunc
+
+function ff_vp9_\txfm1\()_\txfm2\()_8x8_add_10_neon, export=1
+        push            {r4-r8,lr}
+        movw            r8,  #0x03ff
+        b               vp9_\txfm1\()_\txfm2\()_8x8_add_16_neon
+endfunc
+
+function ff_vp9_\txfm1\()_\txfm2\()_8x8_add_12_neon, export=1
+        push            {r4-r8,lr}
+        movw            r8,  #0x0fff
+        b               vp9_\txfm1\()_\txfm2\()_8x8_add_16_neon
+endfunc
+.endm
+
+itxfm_func8x8 idct,  idct
+itxfm_func8x8 iadst, idct
+itxfm_func8x8 idct,  iadst
+itxfm_func8x8 iadst, iadst
+
+function idct16x16_dc_add_neon
+        movrel          r12, idct_coeffs
+        vld1.16         {d0}, [r12,:64]
+
+        vmov.i32        q2,  #0
+        vmovl.s16       q0,  d0
+
+        vld1.32         {d16[]}, [r2,:32]
+        vmull.s32       q8,  d16, d0[0]
+        vrshrn.s64      d16, q8,  #14
+        vmull.s32       q8,  d16, d0[0]
+        vrshrn.s64      d16, q8,  #14
+        vdup.32         q8,  d16[0]
+        vst1.32         {d4[0]}, [r2,:32]
+
+        vrshr.s32       q8,  q8,  #6
+        vdup.s16        q15, r9
+
+        mov             r3,  r0
+        mov             r12, #16
+1:
+        @ Loop to add the constant from q8 into all 16x16 outputs
+        subs            r12, r12, #2
+        vld1.16         {q0-q1},  [r0,:128], r1
+        vaddw.u16       q9,  q8,  d0
+        vaddw.u16       q10, q8,  d1
+        vld1.16         {q2-q3},  [r0,:128], r1
+        vaddw.u16       q11, q8,  d2
+        vaddw.u16       q12, q8,  d3
+        vaddw.u16       q13, q8,  d4
+        vaddw.u16       q14, q8,  d5
+        vqmovun.s32     d0,  q9
+        vaddw.u16       q9,  q8,  d6
+        vqmovun.s32     d1,  q10
+        vaddw.u16       q10, q8,  d7
+        vqmovun.s32     d2,  q11
+        vqmovun.s32     d3,  q12
+        vqmovun.s32     d4,  q13
+        vqmovun.s32     d5,  q14
+        vmin.u16        q0,  q0,  q15
+        vmin.u16        q1,  q1,  q15
+        vqmovun.s32     d6,  q9
+        vqmovun.s32     d7,  q10
+        vst1.16         {q0-q1},  [r3,:128], r1
+        vmin.u16        q2,  q2,  q15
+        vmin.u16        q3,  q3,  q15
+        vst1.16         {q2-q3},  [r3,:128], r1
+        bne             1b
+
+        pop             {r4-r9,pc}
+endfunc
+.ltorg
+
+.macro idct16
+        mbutterfly0     d16, d24, d16, d24, d8, d10, q4,  q5 @ d16 = t0a,  d24 = t1a
+        mbutterfly      d20, d28, d1[0], d1[1], q4,  q5  @ d20 = t2a,  d28 = t3a
+        mbutterfly      d18, d30, d2[0], d2[1], q4,  q5  @ d18 = t4a,  d30 = t7a
+        mbutterfly      d26, d22, d3[0], d3[1], q4,  q5  @ d26 = t5a,  d22 = t6a
+        mbutterfly      d17, d31, d4[0], d4[1], q4,  q5  @ d17 = t8a,  d31 = t15a
+        mbutterfly      d25, d23, d5[0], d5[1], q4,  q5  @ d25 = t9a,  d23 = t14a
+        mbutterfly      d21, d27, d6[0], d6[1], q4,  q5  @ d21 = t10a, d27 = t13a
+        mbutterfly      d29, d19, d7[0], d7[1], q4,  q5  @ d29 = t11a, d19 = t12a
+
+        butterfly       d8,  d28, d16, d28               @ d8  = t0,   d28 = t3
+        butterfly       d9,  d20, d24, d20               @ d9  = t1,   d20 = t2
+        butterfly       d10, d26, d18, d26               @ d10 = t4,   d26 = t5
+        butterfly       d11, d22, d30, d22               @ d11 = t7,   d22 = t6
+        butterfly       d16, d25, d17, d25               @ d16 = t8,   d25 = t9
+        butterfly       d24, d21, d29, d21               @ d24 = t11,  d21 = t10
+        butterfly       d17, d27, d19, d27               @ d17 = t12,  d27 = t13
+        butterfly       d29, d23, d31, d23               @ d29 = t15,  d23 = t14
+
+        mbutterfly0     d22, d26, d22, d26, d18, d30, q9,  q15  @ d22 = t6a, d26 = t5a
+        mbutterfly      d23, d25, d1[0], d1[1], q9,  q15        @ d23 = t9a,  d25 = t14a
+        mbutterfly      d27, d21, d1[0], d1[1], q9,  q15, neg=1 @ d27 = t13a, d21 = t10a
+
+        butterfly       d18, d11, d8,  d11               @ d18 = t0a,  d11 = t7a
+        butterfly       d19, d22, d9,  d22               @ d19 = t1a,  d22 = t6
+        butterfly       d8,  d26, d20, d26               @ d8  = t2a,  d26 = t5
+        butterfly       d9,  d10, d28, d10               @ d9  = t3a,  d10 = t4
+        butterfly       d20, d28, d16, d24               @ d20 = t8a,  d28 = t11a
+        butterfly       d24, d21, d23, d21               @ d24 = t9,   d21 = t10
+        butterfly       d23, d27, d25, d27               @ d23 = t14,  d27 = t13
+        butterfly       d25, d29, d29, d17               @ d25 = t15a, d29 = t12a
+
+        mbutterfly0     d27, d21, d27, d21, d16, d30, q8, q15 @ d27 = t13a, d21 = t10a
+        mbutterfly0     d29, d28, d29, d28, d16, d30, q8, q15 @ d29 = t12,  d28 = t11
+
+        vswp            d27, d29                         @ d27 = t12, d29 = t13a
+        vswp            d28, d27                         @ d28 = t12, d27 = t11
+        butterfly       d16, d31, d18, d25               @ d16 = out[0], d31 = out[15]
+        butterfly       d17, d30, d19, d23               @ d17 = out[1], d30 = out[14]
+        butterfly_r     d25, d22, d22, d24               @ d25 = out[9], d22 = out[6]
+        butterfly       d23, d24, d11, d20               @ d23 = out[7], d24 = out[8]
+        butterfly       d18, d29, d8,  d29               @ d18 = out[2], d29 = out[13]
+        butterfly       d19, d28, d9,  d28               @ d19 = out[3], d28 = out[12]
+        vmov            d8,  d21                         @ d8  = t10a
+        butterfly       d20, d27, d10, d27               @ d20 = out[4], d27 = out[11]
+        butterfly       d21, d26, d26, d8                @ d21 = out[5], d26 = out[10]
+.endm
+
+.macro iadst16
+        movrel          r12, iadst16_coeffs
+        vld1.16         {q0},  [r12,:128]!
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+
+        mbutterfly_l    q3,  q2,  d31, d16, d0[1], d0[0] @ q3  = t1,   q2  = t0
+        mbutterfly_l    q5,  q4,  d23, d24, d2[1], d2[0] @ q5  = t9,   q4  = t8
+        butterfly_n     d31, d24, q3,  q5,  q6,  q5      @ d31 = t1a,  d24 = t9a
+        mbutterfly_l    q7,  q6,  d29, d18, d1[1], d1[0] @ q7  = t3,   q6  = t2
+        butterfly_n     d16, d23, q2,  q4,  q3,  q4      @ d16 = t0a,  d23 = t8a
+        mbutterfly_l    q3,  q2,  d21, d26, d3[1], d3[0] @ q3  = t11,  q2  = t10
+
+        vld1.16         {q0},  [r12,:128]!
+        butterfly_n     d29, d26, q7,  q3,  q4,  q3      @ d29 = t3a,  d26 = t11a
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+        mbutterfly_l    q5,  q4,  d27, d20, d0[1], d0[0] @ q5  = t5,   q4  = t4
+        butterfly_n     d18, d21, q6,  q2,  q3,  q2      @ d18 = t2a,  d21 = t10a
+
+        mbutterfly_l    q7,  q6,  d19, d28, d2[1], d2[0] @ q7  = t13,  q6  = t12
+        butterfly_n     d20, d28, q5,  q7,  q2,  q7      @ d20 = t5a,  d28 = t13a
+        mbutterfly_l    q3,  q2,  d25, d22, d1[1], d1[0] @ q3  = t7,   q2  = t6
+        butterfly_n     d27, d19, q4,  q6,  q5,  q6      @ d27 = t4a,  d19 = t12a
+
+        mbutterfly_l    q5,  q4,  d17, d30, d3[1], d3[0] @ q5  = t15,  q4  = t14
+        movrel          r12, idct_coeffs
+        vld1.16         {q0}, [r12,:128]
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+        butterfly_n     d22, d30, q3,  q5,  q6,  q5      @ d22 = t7a,  d30 = t15a
+        mbutterfly_l    q7,  q6,  d23, d24, d2[0], d2[1] @ q7  = t9,   q6  = t8
+        butterfly_n     d25, d17, q2,  q4,  q3,  q4      @ d25 = t6a,  d17 = t14a
+
+        mbutterfly_l    q2,  q3,  d28, d19, d2[1], d2[0] @ q2  = t12,  q3  = t13
+        butterfly_n     d23, d19, q6,  q2,  q4,  q2      @ d23 = t8a,  d19 = t12a
+        mbutterfly_l    q5,  q4,  d21, d26, d3[0], d3[1] @ q5  = t11,  q4  = t10
+        butterfly_r     d4,  d27, d16, d27               @ d4  = t4,   d27 = t0
+        butterfly_n     d24, d28, q7,  q3,  q6,  q3      @ d24 = t9a,  d28 = t13a
+
+        mbutterfly_l    q6,  q7,  d30, d17, d3[1], d3[0] @ q6  = t14,  q7  = t15
+        butterfly_r     d5,  d20, d31, d20               @ d5  = t5,   d20 = t1
+        butterfly_n     d21, d17, q4,  q6,  q3,  q6      @ d21 = t10a, d17 = t14a
+        butterfly_n     d26, d30, q5,  q7,  q4,  q7      @ d26 = t11a, d30 = t15a
+
+        butterfly_r     d6,  d25, d18, d25               @ d6  = t6,   d25 = t2
+        butterfly_r     d7,  d22, d29, d22               @ d7  = t7,   d22 = t3
+
+        mbutterfly_l    q5,  q4,  d19, d28, d1[0], d1[1] @ q5  = t13,  q4  = t12
+        mbutterfly_l    q6,  q7,  d30, d17, d1[1], d1[0] @ q6  = t14,  q7  = t15
+
+        butterfly_n     d18, d30, q4,  q6,  q8,  q6      @ d18 = out[2],   d30 = t14a
+        butterfly_n     d29, d17, q5,  q7,  q6,  q7      @ d29 = -out[13], d17 = t15a
+        vneg.s32        d29, d29                         @ d29 = out[13]
+
+        mbutterfly_l    q5,  q4,  d4,  d5,  d1[0], d1[1] @ q5  = t5a,  q4  = t4a
+        mbutterfly_l    q6,  q7,  d7,  d6,  d1[1], d1[0] @ q6  = t6a,  q7  = t7a
+
+        butterfly       d2,  d6,  d27, d25               @ d2 = out[0], d6 = t2a
+        butterfly       d3,  d7,  d23, d21               @ d3 =-out[1], d7 = t10
+
+        butterfly_n     d19, d31, q4,  q6,  q2,  q4      @ d19 = -out[3],  d31 = t6
+        vneg.s32        d19, d19                         @ d19 = out[3]
+        butterfly_n     d28, d16, q5,  q7,  q2,  q5      @ d28 = out[12],  d16 = t7
+
+        butterfly       d5,  d8,  d20, d22               @ d5 =-out[15],d8 = t3a
+        butterfly       d4,  d9,  d24, d26               @ d4 = out[14],d9 = t11
+
+        mbutterfly0     d23, d24, d6,  d8,  d10, d11, q6,  q7, 1 @ d23 = out[7], d24 = out[8]
+        mbutterfly0     d20, d27, d16, d31, d10, d11, q6,  q7    @ d20 = out[4], d27 = out[11]
+        mbutterfly0     d22, d25, d9,  d7,  d10, d11, q6,  q7    @ d22 = out[6], d25 = out[9]
+        mbutterfly0     d21, d26, d30, d17, d10, d11, q6,  q7, 1 @ d21 = out[5], d26 = out[10]
+
+        vneg.s32        d31, d5                          @ d31 = out[15]
+        vneg.s32        d17, d3                          @ d17 = out[1]
+
+        vmov            d16, d2
+        vmov            d30, d4
+.endm
+
+.macro itxfm16_1d_funcs txfm
+@ Read a vertical 2x16 slice out of a 16x16 matrix, do a transform on it,
+@ transpose into a horizontal 16x2 slice and store.
+@ r0 = dst (temp buffer)
+@ r2 = src
+function \txfm\()16_1d_2x16_pass1_neon
+        mov             r12, #64
+        vmov.s32        q4,  #0
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+        vld1.32         {d\i}, [r2,:64]
+        vst1.32         {d8},  [r2,:64], r12
+.endr
+
+        \txfm\()16
+
+        @ Do eight 2x2 transposes. Originally, d16-d31 contain the
+        @ 16 rows. Afterwards, d16-d17, d18-d19 etc contain the eight
+        @ transposed 2x2 blocks.
+        transpose32_8x_2x2 d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+
+        @ Store the transposed 2x2 blocks horizontally.
+.irp i, 16, 18, 20, 22, 24, 26, 28, 30, 17, 19, 21, 23, 25, 27, 29, 31
+        vst1.32         {d\i}, [r0,:64]!
+.endr
+        bx              lr
+endfunc
+
+@ Read a vertical 2x16 slice out of a 16x16 matrix, do a transform on it,
+@ load the destination pixels (from a similar 2x16 slice), add and store back.
+@ r0 = dst
+@ r1 = dst stride
+@ r2 = src (temp buffer)
+function \txfm\()16_1d_2x16_pass2_neon
+        mov             r12, #64
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+        vld1.16         {d\i}, [r2,:64], r12
+.endr
+
+        add             r3,  r0,  r1
+        lsl             r1,  r1,  #1
+        \txfm\()16
+
+.macro load_add_store coef0, coef1, coef2, coef3
+        vrshr.s32       \coef0, \coef0, #6
+        vrshr.s32       \coef1, \coef1, #6
+
+        vld1.32         {d8[]},   [r0,:32], r1
+        vld1.32         {d8[1]},  [r3,:32], r1
+        vrshr.s32       \coef2, \coef2, #6
+        vrshr.s32       \coef3, \coef3, #6
+        vld1.32         {d9[]},   [r0,:32], r1
+        vld1.32         {d9[1]},  [r3,:32], r1
+        vaddw.u16       \coef0, \coef0, d8
+        vld1.32         {d10[]},  [r0,:32], r1
+        vld1.32         {d10[1]}, [r3,:32], r1
+        vaddw.u16       \coef1, \coef1, d9
+        vld1.32         {d11[]},  [r0,:32], r1
+        vld1.32         {d11[1]}, [r3,:32], r1
+
+        vqmovun.s32     d8,  \coef0
+        vdup.s16        q8,  r9
+        vqmovun.s32     d9,  \coef1
+        sub             r0,  r0,  r1, lsl #2
+        sub             r3,  r3,  r1, lsl #2
+        vaddw.u16       \coef2, \coef2, d10
+        vaddw.u16       \coef3, \coef3, d11
+        vmin.u16        q4,  q4,  q8
+        vst1.32         {d8[0]},  [r0,:32], r1
+        vst1.32         {d8[1]},  [r3,:32], r1
+        vqmovun.s32     d10, \coef2
+        vst1.32         {d9[0]},  [r0,:32], r1
+        vst1.32         {d9[1]},  [r3,:32], r1
+        vqmovun.s32     d11, \coef3
+        vmin.u16        q5,  q5,  q8
+
+        vst1.32         {d10[0]}, [r0,:32], r1
+        vst1.32         {d10[1]}, [r3,:32], r1
+        vst1.32         {d11[0]}, [r0,:32], r1
+        vst1.32         {d11[1]}, [r3,:32], r1
+.endm
+        load_add_store  q8,  q9,  q10, q11
+        load_add_store  q12, q13, q14, q15
+.purgem load_add_store
+
+        bx              lr
+endfunc
+.endm
+
+itxfm16_1d_funcs idct
+itxfm16_1d_funcs iadst
+
+@ This is the minimum eob value for each subpartition, in increments of 2
+const min_eob_idct_idct_16, align=4
+        .short  0, 3, 10, 22, 38, 62, 89, 121
+endconst
+
+.macro itxfm_func16x16 txfm1, txfm2
+function vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
+.ifc \txfm1\()_\txfm2,idct_idct
+        cmp             r3,  #1
+        beq             idct16x16_dc_add_neon
+.endif
+.ifnc \txfm1\()_\txfm2,idct_idct
+        vpush           {q4-q7}
+.else
+        vpush           {q4-q5}
+        movrel          r8,  min_eob_idct_idct_16 + 2
+.endif
+
+        @ Align the stack, allocate a temp buffer
+T       mov             r7,  sp
+T       and             r7,  r7,  #15
+A       and             r7,  sp,  #15
+        add             r7,  r7,  #1024
+        sub             sp,  sp,  r7
+
+        mov             r4,  r0
+        mov             r5,  r1
+        mov             r6,  r2
+
+.ifc \txfm1,idct
+        movrel          r12, idct_coeffs
+        vld1.16         {q0-q1}, [r12,:128]
+        vmovl.s16       q2,  d2
+        vmovl.s16       q3,  d3
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+.endif
+
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14
+        add             r0,  sp,  #(\i*64)
+.ifc \txfm1\()_\txfm2,idct_idct
+.if \i > 0
+        ldrh_post       r1,  r8,  #2
+        cmp             r3,  r1
+        it              le
+        movle           r1,  #(16 - \i)/2
+        ble             1f
+.endif
+.endif
+        add             r2,  r6,  #(\i*4)
+        bl              \txfm1\()16_1d_2x16_pass1_neon
+.endr
+
+.ifc \txfm1\()_\txfm2,idct_idct
+        b               3f
+1:
+        vmov.i16        q14, #0
+        vmov.i16        q15, #0
+2:
+        subs            r1,  r1,  #1
+        @ Unroll for 2 lines
+.rept 2
+        @ Fill one line with zeros
+        vst1.32         {q14-q15}, [r0,:128]!
+        vst1.32         {q14-q15}, [r0,:128]!
+.endr
+        bne             2b
+3:
+.endif
+
+.ifc \txfm1\()_\txfm2,iadst_idct
+        movrel          r12, idct_coeffs
+        vld1.16         {q0-q1}, [r12,:128]
+        vmovl.s16       q2,  d2
+        vmovl.s16       q3,  d3
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+.endif
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14
+        add             r0,  r4,  #(\i*2)
+        mov             r1,  r5
+        add             r2,  sp,  #(\i*4)
+        bl              \txfm2\()16_1d_2x16_pass2_neon
+.endr
+
+        add             sp,  sp,  r7
+.ifnc \txfm1\()_\txfm2,idct_idct
+        vpop            {q4-q7}
+.else
+        vpop            {q4-q5}
+.endif
+        pop             {r4-r9,pc}
+endfunc
+
+function ff_vp9_\txfm1\()_\txfm2\()_16x16_add_10_neon, export=1
+        push            {r4-r9,lr}
+        movw            r9,  #0x03ff
+        b               vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
+endfunc
+
+function ff_vp9_\txfm1\()_\txfm2\()_16x16_add_12_neon, export=1
+        push            {r4-r9,lr}
+        movw            r9,  #0x0fff
+        b               vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
+endfunc
+.endm
+
+itxfm_func16x16 idct,  idct
+itxfm_func16x16 iadst, idct
+itxfm_func16x16 idct,  iadst
+itxfm_func16x16 iadst, iadst
+.ltorg
+
+
+function idct32x32_dc_add_neon
+        movrel          r12, idct_coeffs
+        vld1.16         {d0}, [r12,:64]
+
+        vmov.i32        q2,  #0
+        vmovl.s16       q0,  d0
+
+        vld1.32         {d16[]}, [r2,:32]
+        vmull.s32       q8,  d16, d0[0]
+        vrshrn.s64      d16, q8,  #14
+        vmull.s32       q8,  d16, d0[0]
+        vrshrn.s64      d16, q8,  #14
+        vdup.32         q8,  d16[0]
+        vst1.32         {d4[0]}, [r2,:32]
+
+        vrshr.s32       q8,  q8,  #6
+        vdup.s16        q15, r9
+
+        mov             r3,  r0
+        mov             r12, #32
+        sub             r1,  r1,  #32
+1:
+        @ Loop to add the constant from q8 into all 32x32 outputs
+        subs            r12, r12, #1
+        vld1.16         {q0-q1},  [r0,:128]!
+        vaddw.u16       q9,  q8,  d0
+        vaddw.u16       q10, q8,  d1
+        vld1.16         {q2-q3},  [r0,:128], r1
+        vaddw.u16       q11, q8,  d2
+        vaddw.u16       q12, q8,  d3
+        vaddw.u16       q13, q8,  d4
+        vaddw.u16       q14, q8,  d5
+        vqmovun.s32     d0,  q9
+        vaddw.u16       q9,  q8,  d6
+        vqmovun.s32     d1,  q10
+        vaddw.u16       q10, q8,  d7
+        vqmovun.s32     d2,  q11
+        vqmovun.s32     d3,  q12
+        vqmovun.s32     d4,  q13
+        vqmovun.s32     d5,  q14
+        vmin.u16        q0,  q0,  q15
+        vmin.u16        q1,  q1,  q15
+        vqmovun.s32     d6,  q9
+        vqmovun.s32     d7,  q10
+        vst1.16         {q0-q1},  [r3,:128]!
+        vmin.u16        q2,  q2,  q15
+        vmin.u16        q3,  q3,  q15
+        vst1.16         {q2-q3},  [r3,:128], r1
+        bne             1b
+
+        pop             {r4-r9,pc}
+endfunc
+
+.macro idct32_odd
+        movrel          r12, idct_coeffs
+        add             r12, r12, #32
+        vld1.16         {q0-q1}, [r12,:128]
+        vmovl.s16       q2,  d2
+        vmovl.s16       q3,  d3
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+
+        mbutterfly      d16, d31, d0[0], d0[1], q4, q5 @ d16 = t16a, d31 = t31a
+        mbutterfly      d24, d23, d1[0], d1[1], q4, q5 @ d24 = t17a, d23 = t30a
+        mbutterfly      d20, d27, d2[0], d2[1], q4, q5 @ d20 = t18a, d27 = t29a
+        mbutterfly      d28, d19, d3[0], d3[1], q4, q5 @ d28 = t19a, d19 = t28a
+        mbutterfly      d18, d29, d4[0], d4[1], q4, q5 @ d18 = t20a, d29 = t27a
+        mbutterfly      d26, d21, d5[0], d5[1], q4, q5 @ d26 = t21a, d21 = t26a
+        mbutterfly      d22, d25, d6[0], d6[1], q4, q5 @ d22 = t22a, d25 = t25a
+        mbutterfly      d30, d17, d7[0], d7[1], q4, q5 @ d30 = t23a, d17 = t24a
+
+        sub             r12, r12, #32
+        vld1.16         {q0}, [r12,:128]
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+
+        butterfly       d4,  d24, d16, d24 @ d4  = t16, d24 = t17
+        butterfly       d5,  d20, d28, d20 @ d5  = t19, d20 = t18
+        butterfly       d6,  d26, d18, d26 @ d6  = t20, d26 = t21
+        butterfly       d7,  d22, d30, d22 @ d7  = t23, d22 = t22
+        butterfly       d28, d25, d17, d25 @ d28 = t24, d25 = t25
+        butterfly       d30, d21, d29, d21 @ d30 = t27, d21 = t26
+        butterfly       d29, d23, d31, d23 @ d29 = t31, d23 = t30
+        butterfly       d31, d27, d19, d27 @ d31 = t28, d27 = t29
+
+        mbutterfly      d23, d24, d2[0], d2[1], q8, q9        @ d23 = t17a, d24 = t30a
+        mbutterfly      d27, d20, d2[0], d2[1], q8, q9, neg=1 @ d27 = t29a, d20 = t18a
+        mbutterfly      d21, d26, d3[0], d3[1], q8, q9        @ d21 = t21a, d26 = t26a
+        mbutterfly      d25, d22, d3[0], d3[1], q8, q9, neg=1 @ d25 = t25a, d22 = t22a
+
+        butterfly       d16, d5,  d4,  d5  @ d16 = t16a, d5  = t19a
+        butterfly       d17, d20, d23, d20 @ d17 = t17,  d20 = t18
+        butterfly       d18, d6,  d7,  d6  @ d18 = t23a, d6  = t20a
+        butterfly       d19, d21, d22, d21 @ d19 = t22,  d21 = t21
+        butterfly       d4,  d28, d28, d30 @ d4  = t24a, d28 = t27a
+        butterfly       d23, d26, d25, d26 @ d23 = t25,  d26 = t26
+        butterfly       d7,  d29, d29, d31 @ d7  = t31a, d29 = t28a
+        butterfly       d22, d27, d24, d27 @ d22 = t30,  d27 = t29
+
+        mbutterfly      d27, d20, d1[0], d1[1], q12, q15        @ d27 = t18a, d20 = t29a
+        mbutterfly      d29, d5,  d1[0], d1[1], q12, q15        @ d29 = t19,  d5  = t28
+        mbutterfly      d28, d6,  d1[0], d1[1], q12, q15, neg=1 @ d28 = t27,  d6  = t20
+        mbutterfly      d26, d21, d1[0], d1[1], q12, q15, neg=1 @ d26 = t26a, d21 = t21a
+
+        butterfly       d31, d24, d7,  d4  @ d31 = t31,  d24 = t24
+        butterfly       d30, d25, d22, d23 @ d30 = t30a, d25 = t25a
+        butterfly_r     d23, d16, d16, d18 @ d23 = t23,  d16 = t16
+        butterfly_r     d22, d17, d17, d19 @ d22 = t22a, d17 = t17a
+        butterfly       d18, d21, d27, d21 @ d18 = t18,  d21 = t21
+        butterfly_r     d27, d28, d5,  d28 @ d27 = t27a, d28 = t28a
+        butterfly       d4,  d26, d20, d26 @ d4  = t29,  d26 = t26
+        butterfly       d19, d20, d29, d6  @ d19 = t19a, d20 = t20
+        vmov            d29, d4            @ d29 = t29
+
+        mbutterfly0     d27, d20, d27, d20, d4, d6, q2, q3 @ d27 = t27,  d20 = t20
+        mbutterfly0     d26, d21, d26, d21, d4, d6, q2, q3 @ d26 = t26a, d21 = t21a
+        mbutterfly0     d25, d22, d25, d22, d4, d6, q2, q3 @ d25 = t25,  d22 = t22
+        mbutterfly0     d24, d23, d24, d23, d4, d6, q2, q3 @ d24 = t24a, d23 = t23a
+.endm
+
+@ Do an 32-point IDCT of a 2x32 slice out of a 32x32 matrix.
+@ We don't have register space to do a single pass IDCT of 2x32 though,
+@ but the 32-point IDCT can be decomposed into two 16-point IDCTs;
+@ a normal IDCT16 with every other input component (the even ones, with
+@ each output written twice), followed by a separate 16-point IDCT
+@ of the odd inputs, added/subtracted onto the outputs of the first idct16.
+@ r0 = dst (temp buffer)
+@ r1 = unused
+@ r2 = src
+function idct32_1d_2x32_pass1_neon
+        movrel          r12, idct_coeffs
+        vld1.16         {q0-q1}, [r12,:128]
+        vmovl.s16       q2,  d2
+        vmovl.s16       q3,  d3
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+
+        @ Double stride of the input, since we only read every other line
+        mov             r12, #256
+        vmov.s32        d8,  #0
+
+        @ d16 = IN(0), d17 = IN(2) ... d31 = IN(30)
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+        vld1.32         {d\i}, [r2,:64]
+        vst1.32         {d8},  [r2,:64], r12
+.endr
+
+        idct16
+
+        @ Do eight 2x2 transposes. Originally, d16-d31 contain the
+        @ 16 rows. Afterwards, d16-d17, d18-d19 etc contain the eight
+        @ transposed 2x2 blocks.
+        transpose32_8x_2x2 d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+
+        @ Store the registers a, b, c, d, e, f, g, h horizontally, followed
+        @ by the same registers h, g, f, e, d, c, b, a mirrored.
+.macro store_rev a, b, c, d, e, f, g, h
+.irp i, \a, \b, \c, \d, \e, \f, \g, \h
+        vst1.32         {d\i}, [r0,:64]!
+        vrev64.32       d\i, d\i
+.endr
+.irp i, \h, \g, \f, \e, \d, \c, \b, \a
+        vst1.32         {d\i}, [r0,:64]!
+.endr
+.endm
+        store_rev       16, 18, 20, 22, 24, 26, 28, 30
+        store_rev       17, 19, 21, 23, 25, 27, 29, 31
+        sub             r0,  r0,  #256
+.purgem store_rev
+
+        @ Move r2 back to the start of the input, and move
+        @ to the first odd row
+        sub             r2,  r2,  r12, lsl #4
+        add             r2,  r2,  #128
+
+        vmov.s32        d4,  #0
+        @ d16 = IN(1), d17 = IN(3) ... d31 = IN(31)
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+        vld1.16         {d\i}, [r2,:64]
+        vst1.16         {d4},  [r2,:64], r12
+.endr
+
+        idct32_odd
+
+        transpose32_8x_2x2 d31, d30, d29, d28, d27, d26, d25, d24, d23, d22, d21, d20, d19, d18, d17, d16
+
+        @ Store the registers a, b, c, d, e, f, g, h horizontally,
+        @ adding into the output first, and then mirrored, subtracted
+        @ from the output.
+.macro store_rev a, b, c, d, e, f, g, h
+.irp i, \a, \b, \c, \d, \e, \f, \g, \h
+        vld1.32         {d4},  [r0,:64]
+        vadd.s32        d4, d4, d\i
+        vst1.32         {d4},  [r0,:64]!
+        vrev64.32       d\i, d\i
+.endr
+.irp i, \h, \g, \f, \e, \d, \c, \b, \a
+        vld1.32         {d4},  [r0,:64]
+        vsub.s32        d4, d4, d\i
+        vst1.32         {d4},  [r0,:64]!
+.endr
+.endm
+
+        store_rev       31, 29, 27, 25, 23, 21, 19, 17
+        store_rev       30, 28, 26, 24, 22, 20, 18, 16
+.purgem store_rev
+        bx              lr
+endfunc
+.ltorg
+
+@ This is mostly the same as 2x32_pass1, but without the transpose,
+@ and use the source as temp buffer between the two idct passes, and
+@ add into the destination.
+@ r0 = dst
+@ r1 = dst stride
+@ r2 = src (temp buffer)
+function idct32_1d_2x32_pass2_neon
+        movrel          r12, idct_coeffs
+        vld1.16         {q0-q1}, [r12,:128]
+        vmovl.s16       q2,  d2
+        vmovl.s16       q3,  d3
+        vmovl.s16       q1,  d1
+        vmovl.s16       q0,  d0
+
+        mov             r12, #256
+        @ d16 = IN(0), d17 = IN(2) ... d31 = IN(30)
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+        vld1.32         {d\i}, [r2,:64], r12
+.endr
+        sub             r2,  r2,  r12, lsl #4
+
+        idct16
+
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+        vst1.32         {d\i}, [r2,:64], r12
+.endr
+
+        sub             r2,  r2,  r12, lsl #4
+        add             r2,  r2,  #128
+
+        @ d16 = IN(1), d17 = IN(3) ... d31 = IN(31)
+.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+        vld1.32         {d\i}, [r2,:64], r12
+.endr
+        sub             r2,  r2,  r12, lsl #4
+        sub             r2,  r2,  #128
+
+        idct32_odd
+
+        mov             r12, #256
+        vdup.s16        q4,  r9
+.macro load_acc_store a, b, c, d, neg=0
+        vld1.32         {d4},  [r2,:64], r12
+        vld1.32         {d5},  [r2,:64], r12
+.if \neg == 0
+        vadd.s32        d4, d4, d\a
+        vld1.32         {d6},  [r2,:64], r12
+        vadd.s32        d5, d5, d\b
+        vld1.32         {d7},  [r2,:64], r12
+        vadd.s32        d6, d6, d\c
+        vadd.s32        d7, d7, d\d
+.else
+        vsub.s32        d4, d4, d\a
+        vld1.32         {d6},  [r2,:64], r12
+        vsub.s32        d5, d5, d\b
+        vld1.32         {d7},  [r2,:64], r12
+        vsub.s32        d6, d6, d\c
+        vsub.s32        d7, d7, d\d
+.endif
+        vld1.32         {d2[]},   [r0,:32], r1
+        vld1.32         {d2[1]},  [r0,:32], r1
+        vrshr.s32       q2, q2, #6
+        vld1.32         {d3[]},   [r0,:32], r1
+        vrshr.s32       q3, q3, #6
+        vld1.32         {d3[1]},  [r0,:32], r1
+        sub             r0,  r0,  r1, lsl #2
+        vaddw.u16       q2,  q2,  d2
+        vaddw.u16       q3,  q3,  d3
+        vqmovun.s32     d4,  q2
+        vqmovun.s32     d5,  q3
+        vmin.u16        q2,  q2,  q4
+        vst1.32         {d4[0]},  [r0,:32], r1
+        vst1.32         {d4[1]},  [r0,:32], r1
+        vst1.32         {d5[0]},  [r0,:32], r1
+        vst1.32         {d5[1]},  [r0,:32], r1
+.endm
+        load_acc_store  31, 30, 29, 28
+        load_acc_store  27, 26, 25, 24
+        load_acc_store  23, 22, 21, 20
+        load_acc_store  19, 18, 17, 16
+        sub             r2,  r2,  r12
+        neg             r12, r12
+        load_acc_store  16, 17, 18, 19, 1
+        load_acc_store  20, 21, 22, 23, 1
+        load_acc_store  24, 25, 26, 27, 1
+        load_acc_store  28, 29, 30, 31, 1
+.purgem load_acc_store
+        bx              lr
+endfunc
+
+const min_eob_idct_idct_32, align=4
+        .short  0, 3, 9, 21, 34, 51, 70, 98, 135, 176, 240, 258, 336, 357, 448, 472
+endconst
+
+function vp9_idct_idct_32x32_add_16_neon
+        cmp             r3,  #1
+        beq             idct32x32_dc_add_neon
+        vpush           {q4-q5}
+        movrel          r8,  min_eob_idct_idct_32 + 2
+
+        @ Align the stack, allocate a temp buffer
+T       mov             r7,  sp
+T       and             r7,  r7,  #15
+A       and             r7,  sp,  #15
+        add             r7,  r7,  #4096
+        sub             sp,  sp,  r7
+
+        mov             r4,  r0
+        mov             r5,  r1
+        mov             r6,  r2
+
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+        add             r0,  sp,  #(\i*128)
+.if \i > 0
+        ldrh_post       r1,  r8,  #2
+        cmp             r3,  r1
+        it              le
+        movle           r1,  #(32 - \i)/2
+        ble             1f
+.endif
+        add             r2,  r6,  #(\i*4)
+        bl              idct32_1d_2x32_pass1_neon
+.endr
+        b               3f
+
+1:
+        @ Write zeros to the temp buffer for pass 2
+        vmov.i16        q14, #0
+        vmov.i16        q15, #0
+2:
+        subs            r1,  r1,  #1
+.rept 2
+        @ Fill one line with zeros
+        vst1.16         {q14-q15}, [r0,:128]!
+        vst1.16         {q14-q15}, [r0,:128]!
+        vst1.16         {q14-q15}, [r0,:128]!
+        vst1.16         {q14-q15}, [r0,:128]!
+.endr
+        bne             2b
+3:
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+        add             r0,  r4,  #(\i*2)
+        mov             r1,  r5
+        add             r2,  sp,  #(\i*4)
+        bl              idct32_1d_2x32_pass2_neon
+.endr
+
+        add             sp,  sp,  r7
+        vpop            {q4-q5}
+        pop             {r4-r9,pc}
+endfunc
+
+function ff_vp9_idct_idct_32x32_add_10_neon, export=1
+        push            {r4-r9,lr}
+        movw            r9,  #0x03ff
+        b               vp9_idct_idct_32x32_add_16_neon
+endfunc
+
+function ff_vp9_idct_idct_32x32_add_12_neon, export=1
+        push            {r4-r9,lr}
+        movw            r9,  #0x0fff
+        b               vp9_idct_idct_32x32_add_16_neon
+endfunc
-- 
2.7.4



More information about the ffmpeg-devel mailing list