[FFmpeg-devel] [PATCH 6/8] aarch64: Add NEON optimizations for 10 and 12 bit vp9 MC

Martin Storsjö martin at martin.st
Wed Jan 18 23:45:13 EET 2017


This work is sponsored by, and copyright, Google.

This has mostly got the same differences to the 8 bit version as
in the arm version. For the horizontal filters, we do 16 pixels
in parallel as well. For the 8 pixel wide vertical filters, we can
accumulate 4 rows before storing, just as in the 8 bit version.

Examples of runtimes vs the 32 bit version, on a Cortex A53:
                                           ARM   AArch64
vp9_avg4_10bpp_neon:                      35.7      30.7
vp9_avg8_10bpp_neon:                      93.5      84.7
vp9_avg16_10bpp_neon:                    324.4     296.6
vp9_avg32_10bpp_neon:                   1236.5    1148.2
vp9_avg64_10bpp_neon:                   4639.6    4571.1
vp9_avg_8tap_smooth_4h_10bpp_neon:       130.0     128.0
vp9_avg_8tap_smooth_4hv_10bpp_neon:      440.0     440.5
vp9_avg_8tap_smooth_4v_10bpp_neon:       114.0     105.5
vp9_avg_8tap_smooth_8h_10bpp_neon:       327.0     314.0
vp9_avg_8tap_smooth_8hv_10bpp_neon:      918.7     865.4
vp9_avg_8tap_smooth_8v_10bpp_neon:       330.0     300.2
vp9_avg_8tap_smooth_16h_10bpp_neon:     1187.5    1155.5
vp9_avg_8tap_smooth_16hv_10bpp_neon:    2663.1    2591.0
vp9_avg_8tap_smooth_16v_10bpp_neon:     1107.4    1078.3
vp9_avg_8tap_smooth_64h_10bpp_neon:    17754.6   17454.7
vp9_avg_8tap_smooth_64hv_10bpp_neon:   33285.2   33001.5
vp9_avg_8tap_smooth_64v_10bpp_neon:    16066.9   16048.6
vp9_put4_10bpp_neon:                      25.5      21.7
vp9_put8_10bpp_neon:                      56.0      52.0
vp9_put16_10bpp_neon/armv8:              183.0     163.1
vp9_put32_10bpp_neon/armv8:              678.6     563.1
vp9_put64_10bpp_neon/armv8:             2679.9    2195.8
vp9_put_8tap_smooth_4h_10bpp_neon:       120.0     118.0
vp9_put_8tap_smooth_4hv_10bpp_neon:      435.2     435.0
vp9_put_8tap_smooth_4v_10bpp_neon:       107.0      98.2
vp9_put_8tap_smooth_8h_10bpp_neon:       303.0     290.0
vp9_put_8tap_smooth_8hv_10bpp_neon:      893.7     828.7
vp9_put_8tap_smooth_8v_10bpp_neon:       305.5     263.5
vp9_put_8tap_smooth_16h_10bpp_neon:     1089.1    1059.2
vp9_put_8tap_smooth_16hv_10bpp_neon:    2578.8    2452.4
vp9_put_8tap_smooth_16v_10bpp_neon:     1009.5     933.5
vp9_put_8tap_smooth_64h_10bpp_neon:    16223.4   15918.6
vp9_put_8tap_smooth_64hv_10bpp_neon:   32153.0   31016.2
vp9_put_8tap_smooth_64v_10bpp_neon:    14516.5   13748.1

These are generally about as fast as the corresponding ARM
routines on the same CPU (at least on the A53), in most cases
marginally faster.

The speedup vs C code is around 4-9x.
---
 libavcodec/aarch64/Makefile                        |   5 +-
 libavcodec/aarch64/vp9dsp_init.h                   |  29 +
 libavcodec/aarch64/vp9dsp_init_10bpp_aarch64.c     |  23 +
 libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c     |  23 +
 .../aarch64/vp9dsp_init_16bpp_aarch64_template.c   | 163 ++++++
 libavcodec/aarch64/vp9dsp_init_aarch64.c           |   9 +-
 libavcodec/aarch64/vp9mc_16bpp_neon.S              | 631 +++++++++++++++++++++
 7 files changed, 881 insertions(+), 2 deletions(-)
 create mode 100644 libavcodec/aarch64/vp9dsp_init.h
 create mode 100644 libavcodec/aarch64/vp9dsp_init_10bpp_aarch64.c
 create mode 100644 libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c
 create mode 100644 libavcodec/aarch64/vp9dsp_init_16bpp_aarch64_template.c
 create mode 100644 libavcodec/aarch64/vp9mc_16bpp_neon.S

diff --git a/libavcodec/aarch64/Makefile b/libavcodec/aarch64/Makefile
index 5593863..0766e90 100644
--- a/libavcodec/aarch64/Makefile
+++ b/libavcodec/aarch64/Makefile
@@ -15,7 +15,9 @@ OBJS-$(CONFIG_DCA_DECODER)              += aarch64/synth_filter_init.o
 OBJS-$(CONFIG_RV40_DECODER)             += aarch64/rv40dsp_init_aarch64.o
 OBJS-$(CONFIG_VC1DSP)                   += aarch64/vc1dsp_init_aarch64.o
 OBJS-$(CONFIG_VORBIS_DECODER)           += aarch64/vorbisdsp_init.o
-OBJS-$(CONFIG_VP9_DECODER)              += aarch64/vp9dsp_init_aarch64.o
+OBJS-$(CONFIG_VP9_DECODER)              += aarch64/vp9dsp_init_10bpp_aarch64.o \
+                                           aarch64/vp9dsp_init_12bpp_aarch64.o \
+                                           aarch64/vp9dsp_init_aarch64.o
 
 # ARMv8 optimizations
 
@@ -42,4 +44,5 @@ NEON-OBJS-$(CONFIG_DCA_DECODER)         += aarch64/synth_filter_neon.o
 NEON-OBJS-$(CONFIG_VORBIS_DECODER)      += aarch64/vorbisdsp_neon.o
 NEON-OBJS-$(CONFIG_VP9_DECODER)         += aarch64/vp9itxfm_neon.o             \
                                            aarch64/vp9lpf_neon.o               \
+                                           aarch64/vp9mc_16bpp_neon.o          \
                                            aarch64/vp9mc_neon.o
diff --git a/libavcodec/aarch64/vp9dsp_init.h b/libavcodec/aarch64/vp9dsp_init.h
new file mode 100644
index 0000000..9df1752
--- /dev/null
+++ b/libavcodec/aarch64/vp9dsp_init.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AARCH64_VP9DSP_INIT_H
+#define AVCODEC_AARCH64_VP9DSP_INIT_H
+
+#include "libavcodec/vp9dsp.h"
+
+void ff_vp9dsp_init_10bpp_aarch64(VP9DSPContext *dsp);
+void ff_vp9dsp_init_12bpp_aarch64(VP9DSPContext *dsp);
+
+#endif /* AVCODEC_AARCH64_VP9DSP_INIT_H */
diff --git a/libavcodec/aarch64/vp9dsp_init_10bpp_aarch64.c b/libavcodec/aarch64/vp9dsp_init_10bpp_aarch64.c
new file mode 100644
index 0000000..0fa0d7f
--- /dev/null
+++ b/libavcodec/aarch64/vp9dsp_init_10bpp_aarch64.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BPP 10
+#define INIT_FUNC ff_vp9dsp_init_10bpp_aarch64
+#include "vp9dsp_init_16bpp_aarch64_template.c"
diff --git a/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c b/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c
new file mode 100644
index 0000000..dae2232
--- /dev/null
+++ b/libavcodec/aarch64/vp9dsp_init_12bpp_aarch64.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BPP 12
+#define INIT_FUNC ff_vp9dsp_init_12bpp_aarch64
+#include "vp9dsp_init_16bpp_aarch64_template.c"
diff --git a/libavcodec/aarch64/vp9dsp_init_16bpp_aarch64_template.c b/libavcodec/aarch64/vp9dsp_init_16bpp_aarch64_template.c
new file mode 100644
index 0000000..4719ea3
--- /dev/null
+++ b/libavcodec/aarch64/vp9dsp_init_16bpp_aarch64_template.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/aarch64/cpu.h"
+#include "vp9dsp_init.h"
+
+#define declare_fpel(type, sz, suffix)                                          \
+void ff_vp9_##type##sz##suffix##_neon(uint8_t *dst, ptrdiff_t dst_stride,       \
+                                      const uint8_t *src, ptrdiff_t src_stride, \
+                                      int h, int mx, int my)
+
+#define decl_mc_func(op, filter, dir, sz, bpp)                                                   \
+void ff_vp9_##op##_##filter##sz##_##dir##_##bpp##_neon(uint8_t *dst, ptrdiff_t dst_stride,       \
+                                                       const uint8_t *src, ptrdiff_t src_stride, \
+                                                       int h, int mx, int my)
+
+#define define_8tap_2d_fn(op, filter, sz, bpp)                                      \
+static void op##_##filter##sz##_hv_##bpp##_neon(uint8_t *dst, ptrdiff_t dst_stride, \
+                                                const uint8_t *src,                 \
+                                                ptrdiff_t src_stride,               \
+                                                int h, int mx, int my)              \
+{                                                                                   \
+    LOCAL_ALIGNED_16(uint8_t, temp, [((1 + (sz < 64)) * sz + 8) * sz * 2]);         \
+    /* We only need h + 7 lines, but the horizontal filter assumes an               \
+     * even number of rows, so filter h + 8 lines here. */                          \
+    ff_vp9_put_##filter##sz##_h_##bpp##_neon(temp, 2 * sz,                          \
+                                             src - 3 * src_stride, src_stride,      \
+                                             h + 8, mx, 0);                         \
+    ff_vp9_##op##_##filter##sz##_v_##bpp##_neon(dst, dst_stride,                    \
+                                                temp + 3 * 2 * sz, 2 * sz,          \
+                                                h, 0, my);                          \
+}
+
+#define decl_filter_funcs(op, dir, sz, bpp)  \
+    decl_mc_func(op, regular, dir, sz, bpp); \
+    decl_mc_func(op, sharp,   dir, sz, bpp); \
+    decl_mc_func(op, smooth,  dir, sz, bpp)
+
+#define decl_mc_funcs(sz, bpp)           \
+    decl_filter_funcs(put, h,  sz, bpp); \
+    decl_filter_funcs(avg, h,  sz, bpp); \
+    decl_filter_funcs(put, v,  sz, bpp); \
+    decl_filter_funcs(avg, v,  sz, bpp); \
+    decl_filter_funcs(put, hv, sz, bpp); \
+    decl_filter_funcs(avg, hv, sz, bpp)
+
+#define ff_vp9_copy32_neon  ff_vp9_copy32_aarch64
+#define ff_vp9_copy64_neon  ff_vp9_copy64_aarch64
+#define ff_vp9_copy128_neon ff_vp9_copy128_aarch64
+
+declare_fpel(copy, 128, );
+declare_fpel(copy, 64,  );
+declare_fpel(copy, 32,  );
+declare_fpel(copy, 16,  );
+declare_fpel(copy, 8,   );
+declare_fpel(avg, 64, _16);
+declare_fpel(avg, 32, _16);
+declare_fpel(avg, 16, _16);
+declare_fpel(avg, 8,  _16);
+declare_fpel(avg, 4,  _16);
+
+decl_mc_funcs(64, BPP);
+decl_mc_funcs(32, BPP);
+decl_mc_funcs(16, BPP);
+decl_mc_funcs(8, BPP);
+decl_mc_funcs(4, BPP);
+
+#define define_8tap_2d_funcs(sz, bpp)        \
+    define_8tap_2d_fn(put, regular, sz, bpp) \
+    define_8tap_2d_fn(put, sharp,   sz, bpp) \
+    define_8tap_2d_fn(put, smooth,  sz, bpp) \
+    define_8tap_2d_fn(avg, regular, sz, bpp) \
+    define_8tap_2d_fn(avg, sharp,   sz, bpp) \
+    define_8tap_2d_fn(avg, smooth,  sz, bpp)
+
+define_8tap_2d_funcs(64, BPP)
+define_8tap_2d_funcs(32, BPP)
+define_8tap_2d_funcs(16, BPP)
+define_8tap_2d_funcs(8,  BPP)
+define_8tap_2d_funcs(4,  BPP)
+
+static av_cold void vp9dsp_mc_init_aarch64(VP9DSPContext *dsp)
+{
+    int cpu_flags = av_get_cpu_flags();
+
+#define init_fpel(idx1, idx2, sz, type, suffix)      \
+    dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \
+    dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \
+    dsp->mc[idx1][FILTER_8TAP_SHARP  ][idx2][0][0] = \
+    dsp->mc[idx1][FILTER_BILINEAR    ][idx2][0][0] = ff_vp9_##type##sz##suffix
+
+#define init_copy(idx, sz, suffix) \
+    init_fpel(idx, 0, sz, copy, suffix)
+
+#define init_avg(idx, sz, suffix) \
+    init_fpel(idx, 1, sz, avg,  suffix)
+
+#define init_copy_avg(idx, sz1, sz2) \
+    init_copy(idx, sz2, _neon);      \
+    init_avg (idx, sz1, _16_neon)
+
+    if (have_armv8(cpu_flags)) {
+        init_copy(0, 128, _aarch64);
+        init_copy(1, 64,  _aarch64);
+        init_copy(2, 32,  _aarch64);
+    }
+
+    if (have_neon(cpu_flags)) {
+#define init_mc_func(idx1, idx2, op, filter, fname, dir, mx, my, sz, pfx, bpp) \
+    dsp->mc[idx1][filter][idx2][mx][my] = pfx##op##_##fname##sz##_##dir##_##bpp##_neon
+
+#define init_mc_funcs(idx, dir, mx, my, sz, pfx, bpp)                                   \
+    init_mc_func(idx, 0, put, FILTER_8TAP_REGULAR, regular, dir, mx, my, sz, pfx, bpp); \
+    init_mc_func(idx, 0, put, FILTER_8TAP_SHARP,   sharp,   dir, mx, my, sz, pfx, bpp); \
+    init_mc_func(idx, 0, put, FILTER_8TAP_SMOOTH,  smooth,  dir, mx, my, sz, pfx, bpp); \
+    init_mc_func(idx, 1, avg, FILTER_8TAP_REGULAR, regular, dir, mx, my, sz, pfx, bpp); \
+    init_mc_func(idx, 1, avg, FILTER_8TAP_SHARP,   sharp,   dir, mx, my, sz, pfx, bpp); \
+    init_mc_func(idx, 1, avg, FILTER_8TAP_SMOOTH,  smooth,  dir, mx, my, sz, pfx, bpp)
+
+#define init_mc_funcs_dirs(idx, sz, bpp)            \
+    init_mc_funcs(idx, v,  0, 1, sz, ff_vp9_, bpp); \
+    init_mc_funcs(idx, h,  1, 0, sz, ff_vp9_, bpp); \
+    init_mc_funcs(idx, hv, 1, 1, sz,        , bpp)
+
+
+        init_avg(0, 64, _16_neon);
+        init_avg(1, 32, _16_neon);
+        init_avg(2, 16, _16_neon);
+        init_copy_avg(3, 8, 16);
+        init_copy_avg(4, 4, 8);
+
+        init_mc_funcs_dirs(0, 64, BPP);
+        init_mc_funcs_dirs(1, 32, BPP);
+        init_mc_funcs_dirs(2, 16, BPP);
+        init_mc_funcs_dirs(3, 8,  BPP);
+        init_mc_funcs_dirs(4, 4,  BPP);
+    }
+}
+
+av_cold void INIT_FUNC(VP9DSPContext *dsp)
+{
+    vp9dsp_mc_init_aarch64(dsp);
+}
diff --git a/libavcodec/aarch64/vp9dsp_init_aarch64.c b/libavcodec/aarch64/vp9dsp_init_aarch64.c
index 7b50540..91a82d8 100644
--- a/libavcodec/aarch64/vp9dsp_init_aarch64.c
+++ b/libavcodec/aarch64/vp9dsp_init_aarch64.c
@@ -23,6 +23,7 @@
 #include "libavutil/attributes.h"
 #include "libavutil/aarch64/cpu.h"
 #include "libavcodec/vp9dsp.h"
+#include "vp9dsp_init.h"
 
 #define declare_fpel(type, sz)                                          \
 void ff_vp9_##type##sz##_neon(uint8_t *dst, ptrdiff_t dst_stride,       \
@@ -241,7 +242,13 @@ static av_cold void vp9dsp_loopfilter_init_aarch64(VP9DSPContext *dsp)
 
 av_cold void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int bpp)
 {
-    if (bpp != 8)
+    if (bpp == 10) {
+        ff_vp9dsp_init_10bpp_aarch64(dsp);
+        return;
+    } else if (bpp == 12) {
+        ff_vp9dsp_init_12bpp_aarch64(dsp);
+        return;
+    } else if (bpp != 8)
         return;
 
     vp9dsp_mc_init_aarch64(dsp);
diff --git a/libavcodec/aarch64/vp9mc_16bpp_neon.S b/libavcodec/aarch64/vp9mc_16bpp_neon.S
new file mode 100644
index 0000000..98ffd2e
--- /dev/null
+++ b/libavcodec/aarch64/vp9mc_16bpp_neon.S
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+// All public functions in this file have the following signature:
+// typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
+//                            const uint8_t *ref, ptrdiff_t ref_stride,
+//                            int h, int mx, int my);
+
+function ff_vp9_copy128_aarch64, export=1
+1:
+        ldp             x5,  x6,  [x2]
+        ldp             x7,  x8,  [x2, #16]
+        stp             x5,  x6,  [x0]
+        ldp             x9,  x10, [x2, #32]
+        stp             x7,  x8,  [x0, #16]
+        subs            w4,  w4,  #1
+        ldp             x11, x12, [x2, #48]
+        stp             x9,  x10, [x0, #32]
+        stp             x11, x12, [x0, #48]
+        ldp             x5,  x6,  [x2, #64]
+        ldp             x7,  x8,  [x2, #80]
+        stp             x5,  x6,  [x0, #64]
+        ldp             x9,  x10, [x2, #96]
+        stp             x7,  x8,  [x0, #80]
+        ldp             x11, x12, [x2, #112]
+        stp             x9,  x10, [x0, #96]
+        stp             x11, x12, [x0, #112]
+        add             x2,  x2,  x3
+        add             x0,  x0,  x1
+        b.ne            1b
+        ret
+endfunc
+
+function ff_vp9_avg64_16_neon, export=1
+        mov             x5,  x0
+        sub             x1,  x1,  #64
+        sub             x3,  x3,  #64
+1:
+        ld1             {v4.8h,  v5.8h,  v6.8h,  v7.8h},  [x2], #64
+        ld1             {v0.8h,  v1.8h,  v2.8h,  v3.8h},  [x0], #64
+        ld1             {v20.8h, v21.8h, v22.8h, v23.8h}, [x2], x3
+        urhadd          v0.8h,  v0.8h,  v4.8h
+        urhadd          v1.8h,  v1.8h,  v5.8h
+        ld1             {v16.8h, v17.8h, v18.8h, v19.8h}, [x0], x1
+        urhadd          v2.8h,  v2.8h,  v6.8h
+        urhadd          v3.8h,  v3.8h,  v7.8h
+        subs            w4,  w4,  #1
+        urhadd          v16.8h, v16.8h, v20.8h
+        urhadd          v17.8h, v17.8h, v21.8h
+        st1             {v0.8h,  v1.8h,  v2.8h,  v3.8h},  [x5], #64
+        urhadd          v18.8h, v18.8h, v22.8h
+        urhadd          v19.8h, v19.8h, v23.8h
+        st1             {v16.8h, v17.8h, v18.8h, v19.8h}, [x5], x1
+        b.ne            1b
+        ret
+endfunc
+
+function ff_vp9_avg32_16_neon, export=1
+        mov             x5,  x0
+1:
+        ld1             {v4.8h,  v5.8h,  v6.8h,  v7.8h},  [x2], x3
+        ld1             {v0.8h,  v1.8h,  v2.8h,  v3.8h},  [x0], x1
+        ld1             {v20.8h, v21.8h, v22.8h, v23.8h}, [x2], x3
+        urhadd          v0.8h,  v0.8h,  v4.8h
+        urhadd          v1.8h,  v1.8h,  v5.8h
+        ld1             {v16.8h, v17.8h, v18.8h, v19.8h}, [x0], x1
+        urhadd          v2.8h,  v2.8h,  v6.8h
+        urhadd          v3.8h,  v3.8h,  v7.8h
+        subs            w4,  w4,  #2
+        urhadd          v16.8h, v16.8h, v20.8h
+        urhadd          v17.8h, v17.8h, v21.8h
+        st1             {v0.8h,  v1.8h,  v2.8h,  v3.8h},  [x5], x1
+        urhadd          v18.8h, v18.8h, v22.8h
+        urhadd          v19.8h, v19.8h, v23.8h
+        st1             {v16.8h, v17.8h, v18.8h, v19.8h}, [x5], x1
+        b.ne            1b
+        ret
+endfunc
+
+function ff_vp9_avg16_16_neon, export=1
+1:
+        ld1             {v2.8h, v3.8h},  [x2], x3
+        ld1             {v0.8h, v1.8h},  [x0]
+        urhadd          v0.8h,  v0.8h,  v2.8h
+        urhadd          v1.8h,  v1.8h,  v3.8h
+        subs            w4,  w4,  #1
+        st1             {v0.8h, v1.8h},  [x0], x1
+        b.ne            1b
+        ret
+endfunc
+
+function ff_vp9_avg8_16_neon, export=1
+        mov             x5,  x0
+1:
+        ld1             {v2.8h},  [x2], x3
+        ld1             {v0.8h},  [x0], x1
+        ld1             {v3.8h},  [x2], x3
+        urhadd          v0.8h,  v0.8h,  v2.8h
+        ld1             {v1.8h},  [x0], x1
+        urhadd          v1.8h,  v1.8h,  v3.8h
+        subs            w4,  w4,  #2
+        st1             {v0.8h},  [x5], x1
+        st1             {v1.8h},  [x5], x1
+        b.ne            1b
+        ret
+endfunc
+
+function ff_vp9_avg4_16_neon, export=1
+        mov             x5,  x0
+1:
+        ld1             {v2.4h},  [x2], x3
+        ld1             {v0.4h},  [x0], x1
+        ld1             {v3.4h},  [x2], x3
+        urhadd          v0.4h,  v0.4h,  v2.4h
+        ld1             {v1.4h},  [x0], x1
+        urhadd          v1.4h,  v1.4h,  v3.4h
+        subs            w4,  w4,  #2
+        st1             {v0.4h},  [x5], x1
+        st1             {v1.8b},  [x5], x1
+        b.ne            1b
+        ret
+endfunc
+
+
+// Extract a vector from src1-src2 and src4-src5 (src1-src3 and src4-src6
+// for size >= 16), and multiply-accumulate into dst1 and dst5 (or
+// dst1-dst2 and dst5-dst6 for size >= 8 and dst1-dst4 and dst5-dst8
+// for size >= 16)
+.macro extmlal dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8, src1, src2, src3, src4, src5, src6, offset, size
+        ext             v20.16b, \src1\().16b, \src2\().16b, #(2*\offset)
+        ext             v22.16b, \src4\().16b, \src5\().16b, #(2*\offset)
+        smlal           \dst1\().4s, v20.4h, v0.h[\offset]
+        smlal           \dst5\().4s, v22.4h, v0.h[\offset]
+.if \size >= 16
+        ext             v21.16b, \src2\().16b, \src3\().16b, #(2*\offset)
+        ext             v23.16b, \src5\().16b, \src6\().16b, #(2*\offset)
+.endif
+.if \size >= 8
+        smlal2          \dst2\().4s, v20.8h, v0.h[\offset]
+        smlal2          \dst6\().4s, v22.8h, v0.h[\offset]
+.endif
+.if \size >= 16
+        smlal           \dst3\().4s, v21.4h, v0.h[\offset]
+        smlal           \dst7\().4s, v23.4h, v0.h[\offset]
+        smlal2          \dst4\().4s, v21.8h, v0.h[\offset]
+        smlal2          \dst8\().4s, v23.8h, v0.h[\offset]
+.endif
+.endm
+
+
+// Instantiate a horizontal filter function for the given size.
+// This can work on 4, 8 or 16 pixels in parallel; for larger
+// widths it will do 16 pixels at a time and loop horizontally.
+// The actual width (in bytes) is passed in x5, the height in w4 and
+// the filter coefficients in x9.
+.macro do_8tap_h type, size
+function \type\()_8tap_\size\()h
+        sub             x2,  x2,  #6
+        add             x6,  x0,  x1
+        add             x7,  x2,  x3
+        add             x1,  x1,  x1
+        add             x3,  x3,  x3
+        // Only size >= 16 loops horizontally and needs
+        // reduced dst stride
+.if \size >= 16
+        sub             x1,  x1,  x5
+.endif
+        // size >= 16 loads two qwords and increments r2,
+        // for size 4/8 it's enough with one qword and no
+        // postincrement
+.if \size >= 16
+        sub             x3,  x3,  x5
+        sub             x3,  x3,  #16
+.endif
+        // Load the filter vector
+        ld1             {v0.8h},  [x9]
+1:
+.if \size >= 16
+        mov             x9,  x5
+.endif
+        // Load src
+.if \size >= 16
+        ld1             {v5.8h,  v6.8h,  v7.8h},  [x2], #48
+        ld1             {v16.8h, v17.8h, v18.8h}, [x7], #48
+.else
+        ld1             {v5.8h,  v6.8h},  [x2]
+        ld1             {v16.8h, v17.8h}, [x7]
+.endif
+2:
+
+        smull           v1.4s,  v5.4h,  v0.h[0]
+        smull           v24.4s, v16.4h, v0.h[0]
+.if \size >= 8
+        smull2          v2.4s,  v5.8h,  v0.h[0]
+        smull2          v25.4s, v16.8h, v0.h[0]
+.endif
+.if \size >= 16
+        smull           v3.4s,  v6.4h,  v0.h[0]
+        smull           v26.4s, v17.4h, v0.h[0]
+        smull2          v4.4s,  v6.8h,  v0.h[0]
+        smull2          v27.4s, v17.8h, v0.h[0]
+.endif
+        extmlal         v1,  v2,  v3,  v4,  v24, v25, v26, v27, v5,  v6,  v7,  v16, v17, v18, 1, \size
+        extmlal         v1,  v2,  v3,  v4,  v24, v25, v26, v27, v5,  v6,  v7,  v16, v17, v18, 2, \size
+        extmlal         v1,  v2,  v3,  v4,  v24, v25, v26, v27, v5,  v6,  v7,  v16, v17, v18, 3, \size
+        extmlal         v1,  v2,  v3,  v4,  v24, v25, v26, v27, v5,  v6,  v7,  v16, v17, v18, 4, \size
+        extmlal         v1,  v2,  v3,  v4,  v24, v25, v26, v27, v5,  v6,  v7,  v16, v17, v18, 5, \size
+        extmlal         v1,  v2,  v3,  v4,  v24, v25, v26, v27, v5,  v6,  v7,  v16, v17, v18, 6, \size
+        extmlal         v1,  v2,  v3,  v4,  v24, v25, v26, v27, v5,  v6,  v7,  v16, v17, v18, 7, \size
+
+        // Round, shift and saturate
+        // The sqrshrun takes care of clamping negative values to zero, but
+        // we manually need to do umin with the max pixel value.
+        sqrshrun        v1.4h,  v1.4s,  #7
+        sqrshrun        v24.4h, v24.4s, #7
+.if \size >= 8
+        sqrshrun2       v1.8h,  v2.4s,  #7
+        sqrshrun2       v24.8h, v25.4s, #7
+        umin            v1.8h,  v1.8h,  v31.8h
+        umin            v24.8h, v24.8h, v31.8h
+.if \size >= 16
+        sqrshrun        v2.4h,  v3.4s,  #7
+        sqrshrun        v25.4h, v26.4s, #7
+        sqrshrun2       v2.8h,  v4.4s,  #7
+        sqrshrun2       v25.8h, v27.4s, #7
+        umin            v2.8h,  v2.8h,  v31.8h
+        umin            v25.8h, v25.8h, v31.8h
+.endif
+.else
+        umin            v1.4h,  v1.4h,  v31.4h
+        umin            v24.4h, v24.4h, v31.4h
+.endif
+        // Average
+.ifc \type,avg
+.if \size >= 16
+        ld1             {v3.8h,  v4.8h},  [x0]
+        ld1             {v29.8h, v30.8h}, [x6]
+        urhadd          v1.8h,  v1.8h,  v3.8h
+        urhadd          v2.8h,  v2.8h,  v4.8h
+        urhadd          v24.8h, v24.8h, v29.8h
+        urhadd          v25.8h, v25.8h, v30.8h
+.elseif \size >= 8
+        ld1             {v3.8h},  [x0]
+        ld1             {v4.8h},  [x6]
+        urhadd          v1.8h,  v1.8h,  v3.8h
+        urhadd          v24.8h, v24.8h, v4.8h
+.else
+        ld1             {v3.4h},  [x0]
+        ld1             {v4.4h},  [x6]
+        urhadd          v1.4h,  v1.4h,  v3.4h
+        urhadd          v24.4h, v24.4h, v4.4h
+.endif
+.endif
+        // Store and loop horizontally (for size >= 16)
+.if \size >= 16
+        subs            x9,  x9,  #32
+        st1             {v1.8h,  v2.8h},  [x0], #32
+        st1             {v24.8h, v25.8h}, [x6], #32
+        beq             3f
+        mov             v5.16b,  v7.16b
+        mov             v16.16b, v18.16b
+        ld1             {v6.8h,  v7.8h},  [x2], #32
+        ld1             {v17.8h, v18.8h}, [x7], #32
+        b               2b
+.elseif \size == 8
+        st1             {v1.8h},  [x0]
+        st1             {v24.8h}, [x6]
+.else // \size == 4
+        st1             {v1.4h},  [x0]
+        st1             {v24.4h}, [x6]
+.endif
+3:
+        // Loop vertically
+        add             x0,  x0,  x1
+        add             x6,  x6,  x1
+        add             x2,  x2,  x3
+        add             x7,  x7,  x3
+        subs            w4,  w4,  #2
+        b.ne            1b
+        ret
+endfunc
+.endm
+
+.macro do_8tap_h_size size
+do_8tap_h put, \size
+do_8tap_h avg, \size
+.endm
+
+do_8tap_h_size 4
+do_8tap_h_size 8
+do_8tap_h_size 16
+
+.macro do_8tap_h_func type, filter, offset, size, bpp
+function ff_vp9_\type\()_\filter\()\size\()_h_\bpp\()_neon, export=1
+        mvni            v31.8h, #((0xff << (\bpp - 8)) & 0xff), lsl #8
+        movrel          x6,  X(ff_vp9_subpel_filters), 256*\offset
+        cmp             w5,  #8
+        add             x9,  x6,  w5, uxtw #4
+        mov             x5,  #2*\size
+.if \size >= 16
+        b               \type\()_8tap_16h
+.else
+        b               \type\()_8tap_\size\()h
+.endif
+endfunc
+.endm
+
+.macro do_8tap_h_filters size, bpp
+do_8tap_h_func put, regular, 1, \size, \bpp
+do_8tap_h_func avg, regular, 1, \size, \bpp
+do_8tap_h_func put, sharp,   2, \size, \bpp
+do_8tap_h_func avg, sharp,   2, \size, \bpp
+do_8tap_h_func put, smooth,  0, \size, \bpp
+do_8tap_h_func avg, smooth,  0, \size, \bpp
+.endm
+
+.macro do_8tap_h_filters_bpp bpp
+do_8tap_h_filters 64, \bpp
+do_8tap_h_filters 32, \bpp
+do_8tap_h_filters 16, \bpp
+do_8tap_h_filters 8,  \bpp
+do_8tap_h_filters 4,  \bpp
+.endm
+
+do_8tap_h_filters_bpp 10
+do_8tap_h_filters_bpp 12
+
+
+// Vertical filters
+
+// Round, shift and saturate and store reg1-reg4
+.macro do_store4 reg1, reg2, reg3, reg4, tmp1, tmp2, tmp3, tmp4, minreg, type
+        sqrshrun        \reg1\().4h,  \reg1\().4s, #7
+        sqrshrun        \reg2\().4h,  \reg2\().4s, #7
+        sqrshrun        \reg3\().4h,  \reg3\().4s, #7
+        sqrshrun        \reg4\().4h,  \reg4\().4s, #7
+.ifc \type,avg
+        ld1             {\tmp1\().4h},  [x7], x1
+        ld1             {\tmp2\().4h},  [x7], x1
+        ld1             {\tmp3\().4h},  [x7], x1
+        ld1             {\tmp4\().4h},  [x7], x1
+.endif
+        umin            \reg1\().4h,  \reg1\().4h,  \minreg\().4h
+        umin            \reg2\().4h,  \reg2\().4h,  \minreg\().4h
+        umin            \reg3\().4h,  \reg3\().4h,  \minreg\().4h
+        umin            \reg4\().4h,  \reg4\().4h,  \minreg\().4h
+.ifc \type,avg
+        urhadd          \reg1\().4h,  \reg1\().4h,  \tmp1\().4h
+        urhadd          \reg2\().4h,  \reg2\().4h,  \tmp2\().4h
+        urhadd          \reg3\().4h,  \reg3\().4h,  \tmp3\().4h
+        urhadd          \reg4\().4h,  \reg4\().4h,  \tmp4\().4h
+.endif
+        st1             {\reg1\().4h},  [x0], x1
+        st1             {\reg2\().4h},  [x0], x1
+        st1             {\reg3\().4h},  [x0], x1
+        st1             {\reg4\().4h},  [x0], x1
+.endm
+
+// Round, shift and saturate and store reg1-8, where
+// reg1-2, reg3-4 etc pairwise correspond to 4 rows.
+.macro do_store8 reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, minreg, type
+        sqrshrun        \reg1\().4h,  \reg1\().4s, #7
+        sqrshrun2       \reg1\().8h,  \reg2\().4s, #7
+        sqrshrun        \reg2\().4h,  \reg3\().4s, #7
+        sqrshrun2       \reg2\().8h,  \reg4\().4s, #7
+        sqrshrun        \reg3\().4h,  \reg5\().4s, #7
+        sqrshrun2       \reg3\().8h,  \reg6\().4s, #7
+        sqrshrun        \reg4\().4h,  \reg7\().4s, #7
+        sqrshrun2       \reg4\().8h,  \reg8\().4s, #7
+.ifc \type,avg
+        ld1             {\reg5\().8h},  [x7], x1
+        ld1             {\reg6\().8h},  [x7], x1
+        ld1             {\reg7\().8h},  [x7], x1
+        ld1             {\reg8\().8h},  [x7], x1
+.endif
+        umin            \reg1\().8h,  \reg1\().8h,  \minreg\().8h
+        umin            \reg2\().8h,  \reg2\().8h,  \minreg\().8h
+        umin            \reg3\().8h,  \reg3\().8h,  \minreg\().8h
+        umin            \reg4\().8h,  \reg4\().8h,  \minreg\().8h
+.ifc \type,avg
+        urhadd          \reg1\().8h,  \reg1\().8h,  \reg5\().8h
+        urhadd          \reg2\().8h,  \reg2\().8h,  \reg6\().8h
+        urhadd          \reg3\().8h,  \reg3\().8h,  \reg7\().8h
+        urhadd          \reg4\().8h,  \reg4\().8h,  \reg8\().8h
+.endif
+        st1             {\reg1\().8h},  [x0], x1
+        st1             {\reg2\().8h},  [x0], x1
+        st1             {\reg3\().8h},  [x0], x1
+        st1             {\reg4\().8h},  [x0], x1
+.endm
+
+// Evaluate the filter twice in parallel, from the inputs src1-src9 into dst1-dst2
+// (src1-src8 into dst1, src2-src9 into dst2).
+.macro convolve4 dst1, dst2, src1, src2, src3, src4, src5, src6, src7, src8, src9, tmp1, tmp2
+        smull           \dst1\().4s, \src1\().4h, v0.h[0]
+        smull           \dst2\().4s, \src2\().4h, v0.h[0]
+        smull           \tmp1\().4s, \src2\().4h, v0.h[1]
+        smull           \tmp2\().4s, \src3\().4h, v0.h[1]
+        smlal           \dst1\().4s, \src3\().4h, v0.h[2]
+        smlal           \dst2\().4s, \src4\().4h, v0.h[2]
+        smlal           \tmp1\().4s, \src4\().4h, v0.h[3]
+        smlal           \tmp2\().4s, \src5\().4h, v0.h[3]
+        smlal           \dst1\().4s, \src5\().4h, v0.h[4]
+        smlal           \dst2\().4s, \src6\().4h, v0.h[4]
+        smlal           \tmp1\().4s, \src6\().4h, v0.h[5]
+        smlal           \tmp2\().4s, \src7\().4h, v0.h[5]
+        smlal           \dst1\().4s, \src7\().4h, v0.h[6]
+        smlal           \dst2\().4s, \src8\().4h, v0.h[6]
+        smlal           \tmp1\().4s, \src8\().4h, v0.h[7]
+        smlal           \tmp2\().4s, \src9\().4h, v0.h[7]
+        add             \dst1\().4s, \dst1\().4s, \tmp1\().4s
+        add             \dst2\().4s, \dst2\().4s, \tmp2\().4s
+.endm
+
+// Evaluate the filter twice in parallel, from the inputs src1-src9 into dst1-dst4
+// (src1-src8 into dst1-dst2, src2-src9 into dst3-dst4).
+.macro convolve8 dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5, src6, src7, src8, src9
+        smull           \dst1\().4s, \src1\().4h, v0.h[0]
+        smull2          \dst2\().4s, \src1\().8h, v0.h[0]
+        smull           \dst3\().4s, \src2\().4h, v0.h[0]
+        smull2          \dst4\().4s, \src2\().8h, v0.h[0]
+        smlal           \dst1\().4s, \src2\().4h, v0.h[1]
+        smlal2          \dst2\().4s, \src2\().8h, v0.h[1]
+        smlal           \dst3\().4s, \src3\().4h, v0.h[1]
+        smlal2          \dst4\().4s, \src3\().8h, v0.h[1]
+        smlal           \dst1\().4s, \src3\().4h, v0.h[2]
+        smlal2          \dst2\().4s, \src3\().8h, v0.h[2]
+        smlal           \dst3\().4s, \src4\().4h, v0.h[2]
+        smlal2          \dst4\().4s, \src4\().8h, v0.h[2]
+        smlal           \dst1\().4s, \src4\().4h, v0.h[3]
+        smlal2          \dst2\().4s, \src4\().8h, v0.h[3]
+        smlal           \dst3\().4s, \src5\().4h, v0.h[3]
+        smlal2          \dst4\().4s, \src5\().8h, v0.h[3]
+        smlal           \dst1\().4s, \src5\().4h, v0.h[4]
+        smlal2          \dst2\().4s, \src5\().8h, v0.h[4]
+        smlal           \dst3\().4s, \src6\().4h, v0.h[4]
+        smlal2          \dst4\().4s, \src6\().8h, v0.h[4]
+        smlal           \dst1\().4s, \src6\().4h, v0.h[5]
+        smlal2          \dst2\().4s, \src6\().8h, v0.h[5]
+        smlal           \dst3\().4s, \src7\().4h, v0.h[5]
+        smlal2          \dst4\().4s, \src7\().8h, v0.h[5]
+        smlal           \dst1\().4s, \src7\().4h, v0.h[6]
+        smlal2          \dst2\().4s, \src7\().8h, v0.h[6]
+        smlal           \dst3\().4s, \src8\().4h, v0.h[6]
+        smlal2          \dst4\().4s, \src8\().8h, v0.h[6]
+        smlal           \dst1\().4s, \src8\().4h, v0.h[7]
+        smlal2          \dst2\().4s, \src8\().8h, v0.h[7]
+        smlal           \dst3\().4s, \src9\().4h, v0.h[7]
+        smlal2          \dst4\().4s, \src9\().8h, v0.h[7]
+.endm
+
+// Instantiate a vertical filter function for filtering 8 pixels at a time.
+// The height is passed in x4, the width in x5 and the filter coefficients
+// in x6.
+.macro do_8tap_8v type
+function \type\()_8tap_8v
+        sub             x2,  x2,  x3, lsl #1
+        sub             x2,  x2,  x3
+        ld1             {v0.8h},  [x6]
+1:
+.ifc \type,avg
+        mov             x7,  x0
+.endif
+        mov             x6,  x4
+
+        ld1             {v17.8h}, [x2], x3
+        ld1             {v18.8h}, [x2], x3
+        ld1             {v19.8h}, [x2], x3
+        ld1             {v20.8h}, [x2], x3
+        ld1             {v21.8h}, [x2], x3
+        ld1             {v22.8h}, [x2], x3
+        ld1             {v23.8h}, [x2], x3
+2:
+        ld1             {v24.8h}, [x2], x3
+        ld1             {v25.8h}, [x2], x3
+        ld1             {v26.8h}, [x2], x3
+        ld1             {v27.8h}, [x2], x3
+
+        convolve8       v2,  v3,  v4,  v5,  v17, v18, v19, v20, v21, v22, v23, v24, v25
+        convolve8       v6,  v7,  v30, v31, v19, v20, v21, v22, v23, v24, v25, v26, v27
+        do_store8       v2,  v3,  v4,  v5,  v6,  v7,  v30, v31, v1,  \type
+
+        subs            x6,  x6,  #4
+        b.eq            8f
+
+        ld1             {v16.8h}, [x2], x3
+        ld1             {v17.8h}, [x2], x3
+        ld1             {v18.8h}, [x2], x3
+        ld1             {v19.8h}, [x2], x3
+        convolve8       v2,  v3,  v4,  v5,  v21, v22, v23, v24, v25, v26, v27, v16, v17
+        convolve8       v6,  v7,  v20, v21, v23, v24, v25, v26, v27, v16, v17, v18, v19
+        do_store8       v2,  v3,  v4,  v5,  v6,  v7,  v20, v21, v1,  \type
+
+        subs            x6,  x6,  #4
+        b.eq            8f
+
+        ld1             {v20.8h}, [x2], x3
+        ld1             {v21.8h}, [x2], x3
+        ld1             {v22.8h}, [x2], x3
+        ld1             {v23.8h}, [x2], x3
+        convolve8       v2,  v3,  v4,  v5,  v25, v26, v27, v16, v17, v18, v19, v20, v21
+        convolve8       v6,  v7,  v24, v25, v27, v16, v17, v18, v19, v20, v21, v22, v23
+        do_store8       v2,  v3,  v4,  v5,  v6,  v7,  v24, v25, v1,  \type
+
+        subs            x6,  x6,  #4
+        b.ne            2b
+
+8:
+        subs            x5,  x5,  #8
+        b.eq            9f
+        // x0 -= h * dst_stride
+        msub            x0,  x1,  x4, x0
+        // x2 -= h * src_stride
+        msub            x2,  x3,  x4, x2
+        // x2 -= 8 * src_stride
+        sub             x2,  x2,  x3, lsl #3
+        // x2 += 1 * src_stride
+        add             x2,  x2,  x3
+        add             x2,  x2,  #16
+        add             x0,  x0,  #16
+        b               1b
+9:
+        ret
+endfunc
+.endm
+
+do_8tap_8v put
+do_8tap_8v avg
+
+
+// Instantiate a vertical filter function for filtering a 4 pixels wide
+// slice. This only is designed to work for 4 or 8 output lines.
+.macro do_8tap_4v type
+function \type\()_8tap_4v
+        sub             x2,  x2,  x3, lsl #1
+        sub             x2,  x2,  x3
+        ld1             {v0.8h},  [x6]
+.ifc \type,avg
+        mov             x7,  x0
+.endif
+
+        ld1             {v16.4h}, [x2], x3
+        ld1             {v17.4h}, [x2], x3
+        ld1             {v18.4h}, [x2], x3
+        ld1             {v19.4h}, [x2], x3
+        ld1             {v20.4h}, [x2], x3
+        ld1             {v21.4h}, [x2], x3
+        ld1             {v22.4h}, [x2], x3
+        ld1             {v23.4h}, [x2], x3
+        ld1             {v24.4h}, [x2], x3
+        ld1             {v25.4h}, [x2], x3
+        ld1             {v26.4h}, [x2], x3
+
+        convolve4       v2,  v3,  v16, v17, v18, v19, v20, v21, v22, v23, v24, v30, v31
+        convolve4       v4,  v5,  v18, v19, v20, v21, v22, v23, v24, v25, v26, v30, v31
+        do_store4       v2,  v3,  v4,  v5,  v28, v29, v30, v31, v1,  \type
+
+        subs            x4,  x4,  #4
+        b.eq            9f
+
+        ld1             {v27.4h}, [x2], x3
+        ld1             {v28.4h}, [x2], x3
+        ld1             {v29.4h}, [x2], x3
+        ld1             {v30.4h}, [x2], x3
+
+        convolve4       v2,  v3,  v20, v21, v22, v23, v24, v25, v26, v27, v28, v16, v17
+        convolve4       v4,  v5,  v22, v23, v24, v25, v26, v27, v28, v29, v30, v16, v17
+        do_store4       v2,  v3,  v4,  v5,  v16, v17, v18, v19, v1,  \type
+
+9:
+        ret
+endfunc
+.endm
+
+do_8tap_4v put
+do_8tap_4v avg
+
+
+.macro do_8tap_v_func type, filter, offset, size, bpp
+function ff_vp9_\type\()_\filter\()\size\()_v_\bpp\()_neon, export=1
+        uxtw            x4,  w4
+        mvni            v1.8h, #((0xff << (\bpp - 8)) & 0xff), lsl #8
+        movrel          x5,  X(ff_vp9_subpel_filters), 256*\offset
+        add             x6,  x5,  w6, uxtw #4
+        mov             x5,  #\size
+.if \size >= 8
+        b               \type\()_8tap_8v
+.else
+        b               \type\()_8tap_4v
+.endif
+endfunc
+.endm
+
+.macro do_8tap_v_filters size, bpp
+do_8tap_v_func put, regular, 1, \size, \bpp
+do_8tap_v_func avg, regular, 1, \size, \bpp
+do_8tap_v_func put, sharp,   2, \size, \bpp
+do_8tap_v_func avg, sharp,   2, \size, \bpp
+do_8tap_v_func put, smooth,  0, \size, \bpp
+do_8tap_v_func avg, smooth,  0, \size, \bpp
+.endm
+
+.macro do_8tap_v_filters_bpp bpp
+do_8tap_v_filters 64, \bpp
+do_8tap_v_filters 32, \bpp
+do_8tap_v_filters 16, \bpp
+do_8tap_v_filters 8,  \bpp
+do_8tap_v_filters 4,  \bpp
+.endm
+
+do_8tap_v_filters_bpp 10
+do_8tap_v_filters_bpp 12
-- 
2.7.4



More information about the ffmpeg-devel mailing list