[FFmpeg-devel] [PATCH 1/5] all: fix -Wextra-semi reported on clang

Ganesh Ajjanagadde gajjanag at mit.edu
Sat Oct 24 15:33:22 CEST 2015


Hi all,

Apologies for the copy/pasted patch, but can't send via send-email.
Copy pasted for review below, attached also for a non-clobbered
version.

--------------------------------------------------------------------------

>From 82b889a359925d1f9f7496c916cf24146b44e648 Mon Sep 17 00:00:00 2001
From: Ganesh Ajjanagadde <gajjanagadde at gmail.com>
Date: Fri, 23 Oct 2015 11:23:42 -0400
Subject: [PATCH 1/5] all: fix -Wextra-semi reported on clang

This fixes extra semicolons that clang 3.7 on GNU/Linux warns about.
These were trigggered when built under -Wpedantic, which essentially
checks for strict ISO compliance in numerous ways.

Signed-off-by: Ganesh Ajjanagadde <gajjanagadde at gmail.com>
---
 ffprobe.c                                   |  20 +-
 libavcodec/x86/hevcdsp_init.c               | 637 ++++++++++++++--------------
 libavcodec/x86/rv40dsp_init.c               |   2 +-
 libavcodec/x86/vp9dsp_init.c                |  14 +-
 libavcodec/x86/vp9dsp_init.h                |   6 +-
 libavcodec/x86/vp9dsp_init_16bpp_template.c |  36 +-
 libavdevice/alsa.c                          |   6 +-
 7 files changed, 358 insertions(+), 363 deletions(-)

diff --git a/ffprobe.c b/ffprobe.c
index ac03689..00584d2 100644
--- a/ffprobe.c
+++ b/ffprobe.c
@@ -3062,16 +3062,16 @@ static int opt_show_versions(const char *opt,
const char *arg)
         return 0;                                                       \
     }

-DEFINE_OPT_SHOW_SECTION(chapters,         CHAPTERS);
-DEFINE_OPT_SHOW_SECTION(error,            ERROR);
-DEFINE_OPT_SHOW_SECTION(format,           FORMAT);
-DEFINE_OPT_SHOW_SECTION(frames,           FRAMES);
-DEFINE_OPT_SHOW_SECTION(library_versions, LIBRARY_VERSIONS);
-DEFINE_OPT_SHOW_SECTION(packets,          PACKETS);
-DEFINE_OPT_SHOW_SECTION(pixel_formats,    PIXEL_FORMATS);
-DEFINE_OPT_SHOW_SECTION(program_version,  PROGRAM_VERSION);
-DEFINE_OPT_SHOW_SECTION(streams,          STREAMS);
-DEFINE_OPT_SHOW_SECTION(programs,         PROGRAMS);
+DEFINE_OPT_SHOW_SECTION(chapters,         CHAPTERS)
+DEFINE_OPT_SHOW_SECTION(error,            ERROR)
+DEFINE_OPT_SHOW_SECTION(format,           FORMAT)
+DEFINE_OPT_SHOW_SECTION(frames,           FRAMES)
+DEFINE_OPT_SHOW_SECTION(library_versions, LIBRARY_VERSIONS)
+DEFINE_OPT_SHOW_SECTION(packets,          PACKETS)
+DEFINE_OPT_SHOW_SECTION(pixel_formats,    PIXEL_FORMATS)
+DEFINE_OPT_SHOW_SECTION(program_version,  PROGRAM_VERSION)
+DEFINE_OPT_SHOW_SECTION(streams,          STREAMS)
+DEFINE_OPT_SHOW_SECTION(programs,         PROGRAMS)

 static const OptionDef real_options[] = {
 #include "cmdutils_common_opts.h"
diff --git a/libavcodec/x86/hevcdsp_init.c b/libavcodec/x86/hevcdsp_init.c
index ddc876d..2181f6d 100644
--- a/libavcodec/x86/hevcdsp_init.c
+++ b/libavcodec/x86/hevcdsp_init.c
@@ -119,8 +119,8 @@ void
ff_hevc_put_hevc_bi_##name##W##_##bitd##_##opt(uint8_t *_dst,
ptrdiff_t dst
 }

 #define mc_rep_funcs(name, bitd, step, W, opt)        \
-    mc_rep_func(name, bitd, step, W, opt);            \
-    mc_rep_uni_func(name, bitd, step, W, opt);        \
+    mc_rep_func(name, bitd, step, W, opt)            \
+    mc_rep_uni_func(name, bitd, step, W, opt)        \
     mc_rep_bi_func(name, bitd, step, W, opt)

 #define mc_rep_func2(name, bitd, step1, step2, W, opt) \
@@ -153,14 +153,9 @@ void
ff_hevc_put_hevc_bi_##name##W##_##bitd##_##opt(uint8_t *dst, ptrdiff_t
dsts
                                                        src2 + step1,
height, mx, my, width);                    \
 }

-#define mc_rep_funcs(name, bitd, step, W, opt)        \
-    mc_rep_func(name, bitd, step, W, opt);            \
-    mc_rep_uni_func(name, bitd, step, W, opt);        \
-    mc_rep_bi_func(name, bitd, step, W, opt)
-
 #define mc_rep_funcs2(name, bitd, step1, step2, W, opt) \
-    mc_rep_func2(name, bitd, step1, step2, W, opt);     \
-    mc_rep_uni_func2(name, bitd, step1, step2, W, opt); \
+    mc_rep_func2(name, bitd, step1, step2, W, opt)      \
+    mc_rep_uni_func2(name, bitd, step1, step2, W, opt)  \
     mc_rep_bi_func2(name, bitd, step1, step2, W, opt)

 #if ARCH_X86_64 && HAVE_SSE4_EXTERNAL
@@ -196,9 +191,9 @@ void
ff_hevc_put_hevc_uni_##name##width1##_10_##opt1(uint8_t *dst,
ptrdiff_t dst
                                                       height, mx, my,
width);                                 \
 }

-#define mc_rep_mixs_10(name, width1, width2, width3, opt1, opt2, width4)    \
-mc_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4);            \
-mc_bi_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4);         \
+#define mc_rep_mixs_10(name, width1, width2, width3, opt1, opt2, width4)   \
+mc_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4)            \
+mc_bi_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4)         \
 mc_uni_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4)

 #define mc_rep_mix_8(name, width1, width2, width3, opt1, opt2)
                                        \
@@ -232,199 +227,199 @@ void
ff_hevc_put_hevc_uni_##name##width1##_8_##opt1(uint8_t *dst, ptrdiff_t
dsts
                                                    height, mx, my,
width);                                    \
 }

-#define mc_rep_mixs_8(name, width1, width2, width3, opt1, opt2)    \
-mc_rep_mix_8(name, width1, width2, width3, opt1, opt2);            \
-mc_bi_rep_mix_8(name, width1, width2, width3, opt1, opt2);         \
+#define mc_rep_mixs_8(name, width1, width2, width3, opt1, opt2)   \
+mc_rep_mix_8(name, width1, width2, width3, opt1, opt2)            \
+mc_bi_rep_mix_8(name, width1, width2, width3, opt1, opt2)         \
 mc_uni_rep_mix_8(name, width1, width2, width3, opt1, opt2)

 #if HAVE_AVX2_EXTERNAL

-mc_rep_mixs_8(pel_pixels, 48, 32, 16, avx2, sse4);
-mc_rep_mixs_8(epel_hv,    48, 32, 16, avx2, sse4);
-mc_rep_mixs_8(epel_h ,    48, 32, 16, avx2, sse4);
-mc_rep_mixs_8(epel_v ,    48, 32, 16, avx2, sse4);
+mc_rep_mixs_8(pel_pixels, 48, 32, 16, avx2, sse4)
+mc_rep_mixs_8(epel_hv,    48, 32, 16, avx2, sse4)
+mc_rep_mixs_8(epel_h ,    48, 32, 16, avx2, sse4)
+mc_rep_mixs_8(epel_v ,    48, 32, 16, avx2, sse4)

-mc_rep_mix_10(pel_pixels, 24, 16, 8, avx2, sse4, 32);
-mc_bi_rep_mix_10(pel_pixels,24, 16, 8, avx2, sse4, 32);
-mc_rep_mixs_10(epel_hv,   24, 16, 8, avx2, sse4, 32);
-mc_rep_mixs_10(epel_h ,   24, 16, 8, avx2, sse4, 32);
-mc_rep_mixs_10(epel_v ,   24, 16, 8, avx2, sse4, 32);
+mc_rep_mix_10(pel_pixels, 24, 16, 8, avx2, sse4, 32)
+mc_bi_rep_mix_10(pel_pixels,24, 16, 8, avx2, sse4, 32)
+mc_rep_mixs_10(epel_hv,   24, 16, 8, avx2, sse4, 32)
+mc_rep_mixs_10(epel_h ,   24, 16, 8, avx2, sse4, 32)
+mc_rep_mixs_10(epel_v ,   24, 16, 8, avx2, sse4, 32)


-mc_rep_mixs_10(qpel_h ,   24, 16, 8, avx2, sse4, 32);
-mc_rep_mixs_10(qpel_v ,   24, 16, 8, avx2, sse4, 32);
-mc_rep_mixs_10(qpel_hv,   24, 16, 8, avx2, sse4, 32);
+mc_rep_mixs_10(qpel_h ,   24, 16, 8, avx2, sse4, 32)
+mc_rep_mixs_10(qpel_v ,   24, 16, 8, avx2, sse4, 32)
+mc_rep_mixs_10(qpel_hv,   24, 16, 8, avx2, sse4, 32)


-mc_rep_uni_func(pel_pixels, 8, 64, 128, avx2);//used for 10bit
-mc_rep_uni_func(pel_pixels, 8, 32, 96, avx2); //used for 10bit
+mc_rep_uni_func(pel_pixels, 8, 64, 128, avx2)//used for 10bit
+mc_rep_uni_func(pel_pixels, 8, 32, 96, avx2) //used for 10bit

-mc_rep_funcs(pel_pixels, 8, 32, 64, avx2);
+mc_rep_funcs(pel_pixels, 8, 32, 64, avx2)

-mc_rep_func(pel_pixels, 10, 16, 32, avx2);
-mc_rep_func(pel_pixels, 10, 16, 48, avx2);
-mc_rep_func(pel_pixels, 10, 32, 64, avx2);
+mc_rep_func(pel_pixels, 10, 16, 32, avx2)
+mc_rep_func(pel_pixels, 10, 16, 48, avx2)
+mc_rep_func(pel_pixels, 10, 32, 64, avx2)

-mc_rep_bi_func(pel_pixels, 10, 16, 32, avx2);
-mc_rep_bi_func(pel_pixels, 10, 16, 48, avx2);
-mc_rep_bi_func(pel_pixels, 10, 32, 64, avx2);
+mc_rep_bi_func(pel_pixels, 10, 16, 32, avx2)
+mc_rep_bi_func(pel_pixels, 10, 16, 48, avx2)
+mc_rep_bi_func(pel_pixels, 10, 32, 64, avx2)

-mc_rep_funcs(epel_h, 8, 32, 64, avx2);
+mc_rep_funcs(epel_h, 8, 32, 64, avx2)

-mc_rep_funcs(epel_v, 8, 32, 64, avx2);
+mc_rep_funcs(epel_v, 8, 32, 64, avx2)

-mc_rep_funcs(epel_h, 10, 16, 32, avx2);
-mc_rep_funcs(epel_h, 10, 16, 48, avx2);
-mc_rep_funcs(epel_h, 10, 32, 64, avx2);
+mc_rep_funcs(epel_h, 10, 16, 32, avx2)
+mc_rep_funcs(epel_h, 10, 16, 48, avx2)
+mc_rep_funcs(epel_h, 10, 32, 64, avx2)

-mc_rep_funcs(epel_v, 10, 16, 32, avx2);
-mc_rep_funcs(epel_v, 10, 16, 48, avx2);
-mc_rep_funcs(epel_v, 10, 32, 64, avx2);
+mc_rep_funcs(epel_v, 10, 16, 32, avx2)
+mc_rep_funcs(epel_v, 10, 16, 48, avx2)
+mc_rep_funcs(epel_v, 10, 32, 64, avx2)


-mc_rep_funcs(epel_hv,  8, 32, 64, avx2);
+mc_rep_funcs(epel_hv,  8, 32, 64, avx2)

-mc_rep_funcs(epel_hv, 10, 16, 32, avx2);
-mc_rep_funcs(epel_hv, 10, 16, 48, avx2);
-mc_rep_funcs(epel_hv, 10, 32, 64, avx2);
+mc_rep_funcs(epel_hv, 10, 16, 32, avx2)
+mc_rep_funcs(epel_hv, 10, 16, 48, avx2)
+mc_rep_funcs(epel_hv, 10, 32, 64, avx2)

-mc_rep_funcs(qpel_h, 8, 32, 64, avx2);
-mc_rep_mixs_8(qpel_h ,  48, 32, 16, avx2, sse4);
+mc_rep_funcs(qpel_h, 8, 32, 64, avx2)
+mc_rep_mixs_8(qpel_h ,  48, 32, 16, avx2, sse4)

-mc_rep_funcs(qpel_v, 8, 32, 64, avx2);
-mc_rep_mixs_8(qpel_v,  48, 32, 16, avx2, sse4);
+mc_rep_funcs(qpel_v, 8, 32, 64, avx2)
+mc_rep_mixs_8(qpel_v,  48, 32, 16, avx2, sse4)

-mc_rep_funcs(qpel_h, 10, 16, 32, avx2);
-mc_rep_funcs(qpel_h, 10, 16, 48, avx2);
-mc_rep_funcs(qpel_h, 10, 32, 64, avx2);
+mc_rep_funcs(qpel_h, 10, 16, 32, avx2)
+mc_rep_funcs(qpel_h, 10, 16, 48, avx2)
+mc_rep_funcs(qpel_h, 10, 32, 64, avx2)

-mc_rep_funcs(qpel_v, 10, 16, 32, avx2);
-mc_rep_funcs(qpel_v, 10, 16, 48, avx2);
-mc_rep_funcs(qpel_v, 10, 32, 64, avx2);
+mc_rep_funcs(qpel_v, 10, 16, 32, avx2)
+mc_rep_funcs(qpel_v, 10, 16, 48, avx2)
+mc_rep_funcs(qpel_v, 10, 32, 64, avx2)

-mc_rep_funcs(qpel_hv, 10, 16, 32, avx2);
-mc_rep_funcs(qpel_hv, 10, 16, 48, avx2);
-mc_rep_funcs(qpel_hv, 10, 32, 64, avx2);
+mc_rep_funcs(qpel_hv, 10, 16, 32, avx2)
+mc_rep_funcs(qpel_hv, 10, 16, 48, avx2)
+mc_rep_funcs(qpel_hv, 10, 32, 64, avx2)

 #endif //AVX2

-mc_rep_funcs(pel_pixels, 8, 16, 64, sse4);
-mc_rep_funcs(pel_pixels, 8, 16, 48, sse4);
-mc_rep_funcs(pel_pixels, 8, 16, 32, sse4);
-mc_rep_funcs(pel_pixels, 8,  8, 24, sse4);
-mc_rep_funcs(pel_pixels,10,  8, 64, sse4);
-mc_rep_funcs(pel_pixels,10,  8, 48, sse4);
-mc_rep_funcs(pel_pixels,10,  8, 32, sse4);
-mc_rep_funcs(pel_pixels,10,  8, 24, sse4);
-mc_rep_funcs(pel_pixels,10,  8, 16, sse4);
-mc_rep_funcs(pel_pixels,10,  4, 12, sse4);
-mc_rep_funcs(pel_pixels,12,  8, 64, sse4);
-mc_rep_funcs(pel_pixels,12,  8, 48, sse4);
-mc_rep_funcs(pel_pixels,12,  8, 32, sse4);
-mc_rep_funcs(pel_pixels,12,  8, 24, sse4);
-mc_rep_funcs(pel_pixels,12,  8, 16, sse4);
-mc_rep_funcs(pel_pixels,12,  4, 12, sse4);
-
-mc_rep_funcs(epel_h, 8, 16, 64, sse4);
-mc_rep_funcs(epel_h, 8, 16, 48, sse4);
-mc_rep_funcs(epel_h, 8, 16, 32, sse4);
-mc_rep_funcs(epel_h, 8,  8, 24, sse4);
-mc_rep_funcs(epel_h,10,  8, 64, sse4);
-mc_rep_funcs(epel_h,10,  8, 48, sse4);
-mc_rep_funcs(epel_h,10,  8, 32, sse4);
-mc_rep_funcs(epel_h,10,  8, 24, sse4);
-mc_rep_funcs(epel_h,10,  8, 16, sse4);
-mc_rep_funcs(epel_h,10,  4, 12, sse4);
-mc_rep_funcs(epel_h,12,  8, 64, sse4);
-mc_rep_funcs(epel_h,12,  8, 48, sse4);
-mc_rep_funcs(epel_h,12,  8, 32, sse4);
-mc_rep_funcs(epel_h,12,  8, 24, sse4);
-mc_rep_funcs(epel_h,12,  8, 16, sse4);
-mc_rep_funcs(epel_h,12,  4, 12, sse4);
-mc_rep_funcs(epel_v, 8, 16, 64, sse4);
-mc_rep_funcs(epel_v, 8, 16, 48, sse4);
-mc_rep_funcs(epel_v, 8, 16, 32, sse4);
-mc_rep_funcs(epel_v, 8,  8, 24, sse4);
-mc_rep_funcs(epel_v,10,  8, 64, sse4);
-mc_rep_funcs(epel_v,10,  8, 48, sse4);
-mc_rep_funcs(epel_v,10,  8, 32, sse4);
-mc_rep_funcs(epel_v,10,  8, 24, sse4);
-mc_rep_funcs(epel_v,10,  8, 16, sse4);
-mc_rep_funcs(epel_v,10,  4, 12, sse4);
-mc_rep_funcs(epel_v,12,  8, 64, sse4);
-mc_rep_funcs(epel_v,12,  8, 48, sse4);
-mc_rep_funcs(epel_v,12,  8, 32, sse4);
-mc_rep_funcs(epel_v,12,  8, 24, sse4);
-mc_rep_funcs(epel_v,12,  8, 16, sse4);
-mc_rep_funcs(epel_v,12,  4, 12, sse4);
-mc_rep_funcs(epel_hv, 8, 16, 64, sse4);
-mc_rep_funcs(epel_hv, 8, 16, 48, sse4);
-mc_rep_funcs(epel_hv, 8, 16, 32, sse4);
-mc_rep_funcs(epel_hv, 8,  8, 24, sse4);
-mc_rep_funcs2(epel_hv,8,  8,  4, 12, sse4);
-mc_rep_funcs(epel_hv,10,  8, 64, sse4);
-mc_rep_funcs(epel_hv,10,  8, 48, sse4);
-mc_rep_funcs(epel_hv,10,  8, 32, sse4);
-mc_rep_funcs(epel_hv,10,  8, 24, sse4);
-mc_rep_funcs(epel_hv,10,  8, 16, sse4);
-mc_rep_funcs(epel_hv,10,  4, 12, sse4);
-mc_rep_funcs(epel_hv,12,  8, 64, sse4);
-mc_rep_funcs(epel_hv,12,  8, 48, sse4);
-mc_rep_funcs(epel_hv,12,  8, 32, sse4);
-mc_rep_funcs(epel_hv,12,  8, 24, sse4);
-mc_rep_funcs(epel_hv,12,  8, 16, sse4);
-mc_rep_funcs(epel_hv,12,  4, 12, sse4);
-
-mc_rep_funcs(qpel_h, 8, 16, 64, sse4);
-mc_rep_funcs(qpel_h, 8, 16, 48, sse4);
-mc_rep_funcs(qpel_h, 8, 16, 32, sse4);
-mc_rep_funcs(qpel_h, 8,  8, 24, sse4);
-mc_rep_funcs(qpel_h,10,  8, 64, sse4);
-mc_rep_funcs(qpel_h,10,  8, 48, sse4);
-mc_rep_funcs(qpel_h,10,  8, 32, sse4);
-mc_rep_funcs(qpel_h,10,  8, 24, sse4);
-mc_rep_funcs(qpel_h,10,  8, 16, sse4);
-mc_rep_funcs(qpel_h,10,  4, 12, sse4);
-mc_rep_funcs(qpel_h,12,  8, 64, sse4);
-mc_rep_funcs(qpel_h,12,  8, 48, sse4);
-mc_rep_funcs(qpel_h,12,  8, 32, sse4);
-mc_rep_funcs(qpel_h,12,  8, 24, sse4);
-mc_rep_funcs(qpel_h,12,  8, 16, sse4);
-mc_rep_funcs(qpel_h,12,  4, 12, sse4);
-mc_rep_funcs(qpel_v, 8, 16, 64, sse4);
-mc_rep_funcs(qpel_v, 8, 16, 48, sse4);
-mc_rep_funcs(qpel_v, 8, 16, 32, sse4);
-mc_rep_funcs(qpel_v, 8,  8, 24, sse4);
-mc_rep_funcs(qpel_v,10,  8, 64, sse4);
-mc_rep_funcs(qpel_v,10,  8, 48, sse4);
-mc_rep_funcs(qpel_v,10,  8, 32, sse4);
-mc_rep_funcs(qpel_v,10,  8, 24, sse4);
-mc_rep_funcs(qpel_v,10,  8, 16, sse4);
-mc_rep_funcs(qpel_v,10,  4, 12, sse4);
-mc_rep_funcs(qpel_v,12,  8, 64, sse4);
-mc_rep_funcs(qpel_v,12,  8, 48, sse4);
-mc_rep_funcs(qpel_v,12,  8, 32, sse4);
-mc_rep_funcs(qpel_v,12,  8, 24, sse4);
-mc_rep_funcs(qpel_v,12,  8, 16, sse4);
-mc_rep_funcs(qpel_v,12,  4, 12, sse4);
-mc_rep_funcs(qpel_hv, 8,  8, 64, sse4);
-mc_rep_funcs(qpel_hv, 8,  8, 48, sse4);
-mc_rep_funcs(qpel_hv, 8,  8, 32, sse4);
-mc_rep_funcs(qpel_hv, 8,  8, 24, sse4);
-mc_rep_funcs(qpel_hv, 8,  8, 16, sse4);
-mc_rep_funcs2(qpel_hv,8,  8,  4, 12, sse4);
-mc_rep_funcs(qpel_hv,10,  8, 64, sse4);
-mc_rep_funcs(qpel_hv,10,  8, 48, sse4);
-mc_rep_funcs(qpel_hv,10,  8, 32, sse4);
-mc_rep_funcs(qpel_hv,10,  8, 24, sse4);
-mc_rep_funcs(qpel_hv,10,  8, 16, sse4);
-mc_rep_funcs(qpel_hv,10,  4, 12, sse4);
-mc_rep_funcs(qpel_hv,12,  8, 64, sse4);
-mc_rep_funcs(qpel_hv,12,  8, 48, sse4);
-mc_rep_funcs(qpel_hv,12,  8, 32, sse4);
-mc_rep_funcs(qpel_hv,12,  8, 24, sse4);
-mc_rep_funcs(qpel_hv,12,  8, 16, sse4);
-mc_rep_funcs(qpel_hv,12,  4, 12, sse4);
+mc_rep_funcs(pel_pixels, 8, 16, 64, sse4)
+mc_rep_funcs(pel_pixels, 8, 16, 48, sse4)
+mc_rep_funcs(pel_pixels, 8, 16, 32, sse4)
+mc_rep_funcs(pel_pixels, 8,  8, 24, sse4)
+mc_rep_funcs(pel_pixels,10,  8, 64, sse4)
+mc_rep_funcs(pel_pixels,10,  8, 48, sse4)
+mc_rep_funcs(pel_pixels,10,  8, 32, sse4)
+mc_rep_funcs(pel_pixels,10,  8, 24, sse4)
+mc_rep_funcs(pel_pixels,10,  8, 16, sse4)
+mc_rep_funcs(pel_pixels,10,  4, 12, sse4)
+mc_rep_funcs(pel_pixels,12,  8, 64, sse4)
+mc_rep_funcs(pel_pixels,12,  8, 48, sse4)
+mc_rep_funcs(pel_pixels,12,  8, 32, sse4)
+mc_rep_funcs(pel_pixels,12,  8, 24, sse4)
+mc_rep_funcs(pel_pixels,12,  8, 16, sse4)
+mc_rep_funcs(pel_pixels,12,  4, 12, sse4)
+
+mc_rep_funcs(epel_h, 8, 16, 64, sse4)
+mc_rep_funcs(epel_h, 8, 16, 48, sse4)
+mc_rep_funcs(epel_h, 8, 16, 32, sse4)
+mc_rep_funcs(epel_h, 8,  8, 24, sse4)
+mc_rep_funcs(epel_h,10,  8, 64, sse4)
+mc_rep_funcs(epel_h,10,  8, 48, sse4)
+mc_rep_funcs(epel_h,10,  8, 32, sse4)
+mc_rep_funcs(epel_h,10,  8, 24, sse4)
+mc_rep_funcs(epel_h,10,  8, 16, sse4)
+mc_rep_funcs(epel_h,10,  4, 12, sse4)
+mc_rep_funcs(epel_h,12,  8, 64, sse4)
+mc_rep_funcs(epel_h,12,  8, 48, sse4)
+mc_rep_funcs(epel_h,12,  8, 32, sse4)
+mc_rep_funcs(epel_h,12,  8, 24, sse4)
+mc_rep_funcs(epel_h,12,  8, 16, sse4)
+mc_rep_funcs(epel_h,12,  4, 12, sse4)
+mc_rep_funcs(epel_v, 8, 16, 64, sse4)
+mc_rep_funcs(epel_v, 8, 16, 48, sse4)
+mc_rep_funcs(epel_v, 8, 16, 32, sse4)
+mc_rep_funcs(epel_v, 8,  8, 24, sse4)
+mc_rep_funcs(epel_v,10,  8, 64, sse4)
+mc_rep_funcs(epel_v,10,  8, 48, sse4)
+mc_rep_funcs(epel_v,10,  8, 32, sse4)
+mc_rep_funcs(epel_v,10,  8, 24, sse4)
+mc_rep_funcs(epel_v,10,  8, 16, sse4)
+mc_rep_funcs(epel_v,10,  4, 12, sse4)
+mc_rep_funcs(epel_v,12,  8, 64, sse4)
+mc_rep_funcs(epel_v,12,  8, 48, sse4)
+mc_rep_funcs(epel_v,12,  8, 32, sse4)
+mc_rep_funcs(epel_v,12,  8, 24, sse4)
+mc_rep_funcs(epel_v,12,  8, 16, sse4)
+mc_rep_funcs(epel_v,12,  4, 12, sse4)
+mc_rep_funcs(epel_hv, 8, 16, 64, sse4)
+mc_rep_funcs(epel_hv, 8, 16, 48, sse4)
+mc_rep_funcs(epel_hv, 8, 16, 32, sse4)
+mc_rep_funcs(epel_hv, 8,  8, 24, sse4)
+mc_rep_funcs2(epel_hv,8,  8,  4, 12, sse4)
+mc_rep_funcs(epel_hv,10,  8, 64, sse4)
+mc_rep_funcs(epel_hv,10,  8, 48, sse4)
+mc_rep_funcs(epel_hv,10,  8, 32, sse4)
+mc_rep_funcs(epel_hv,10,  8, 24, sse4)
+mc_rep_funcs(epel_hv,10,  8, 16, sse4)
+mc_rep_funcs(epel_hv,10,  4, 12, sse4)
+mc_rep_funcs(epel_hv,12,  8, 64, sse4)
+mc_rep_funcs(epel_hv,12,  8, 48, sse4)
+mc_rep_funcs(epel_hv,12,  8, 32, sse4)
+mc_rep_funcs(epel_hv,12,  8, 24, sse4)
+mc_rep_funcs(epel_hv,12,  8, 16, sse4)
+mc_rep_funcs(epel_hv,12,  4, 12, sse4)
+
+mc_rep_funcs(qpel_h, 8, 16, 64, sse4)
+mc_rep_funcs(qpel_h, 8, 16, 48, sse4)
+mc_rep_funcs(qpel_h, 8, 16, 32, sse4)
+mc_rep_funcs(qpel_h, 8,  8, 24, sse4)
+mc_rep_funcs(qpel_h,10,  8, 64, sse4)
+mc_rep_funcs(qpel_h,10,  8, 48, sse4)
+mc_rep_funcs(qpel_h,10,  8, 32, sse4)
+mc_rep_funcs(qpel_h,10,  8, 24, sse4)
+mc_rep_funcs(qpel_h,10,  8, 16, sse4)
+mc_rep_funcs(qpel_h,10,  4, 12, sse4)
+mc_rep_funcs(qpel_h,12,  8, 64, sse4)
+mc_rep_funcs(qpel_h,12,  8, 48, sse4)
+mc_rep_funcs(qpel_h,12,  8, 32, sse4)
+mc_rep_funcs(qpel_h,12,  8, 24, sse4)
+mc_rep_funcs(qpel_h,12,  8, 16, sse4)
+mc_rep_funcs(qpel_h,12,  4, 12, sse4)
+mc_rep_funcs(qpel_v, 8, 16, 64, sse4)
+mc_rep_funcs(qpel_v, 8, 16, 48, sse4)
+mc_rep_funcs(qpel_v, 8, 16, 32, sse4)
+mc_rep_funcs(qpel_v, 8,  8, 24, sse4)
+mc_rep_funcs(qpel_v,10,  8, 64, sse4)
+mc_rep_funcs(qpel_v,10,  8, 48, sse4)
+mc_rep_funcs(qpel_v,10,  8, 32, sse4)
+mc_rep_funcs(qpel_v,10,  8, 24, sse4)
+mc_rep_funcs(qpel_v,10,  8, 16, sse4)
+mc_rep_funcs(qpel_v,10,  4, 12, sse4)
+mc_rep_funcs(qpel_v,12,  8, 64, sse4)
+mc_rep_funcs(qpel_v,12,  8, 48, sse4)
+mc_rep_funcs(qpel_v,12,  8, 32, sse4)
+mc_rep_funcs(qpel_v,12,  8, 24, sse4)
+mc_rep_funcs(qpel_v,12,  8, 16, sse4)
+mc_rep_funcs(qpel_v,12,  4, 12, sse4)
+mc_rep_funcs(qpel_hv, 8,  8, 64, sse4)
+mc_rep_funcs(qpel_hv, 8,  8, 48, sse4)
+mc_rep_funcs(qpel_hv, 8,  8, 32, sse4)
+mc_rep_funcs(qpel_hv, 8,  8, 24, sse4)
+mc_rep_funcs(qpel_hv, 8,  8, 16, sse4)
+mc_rep_funcs2(qpel_hv,8,  8,  4, 12, sse4)
+mc_rep_funcs(qpel_hv,10,  8, 64, sse4)
+mc_rep_funcs(qpel_hv,10,  8, 48, sse4)
+mc_rep_funcs(qpel_hv,10,  8, 32, sse4)
+mc_rep_funcs(qpel_hv,10,  8, 24, sse4)
+mc_rep_funcs(qpel_hv,10,  8, 16, sse4)
+mc_rep_funcs(qpel_hv,10,  4, 12, sse4)
+mc_rep_funcs(qpel_hv,12,  8, 64, sse4)
+mc_rep_funcs(qpel_hv,12,  8, 48, sse4)
+mc_rep_funcs(qpel_hv,12,  8, 32, sse4)
+mc_rep_funcs(qpel_hv,12,  8, 24, sse4)
+mc_rep_funcs(qpel_hv,12,  8, 16, sse4)
+mc_rep_funcs(qpel_hv,12,  4, 12, sse4)

 #define mc_rep_uni_w(bitd, step, W, opt) \
 void ff_hevc_put_hevc_uni_w##W##_##bitd##_##opt(uint8_t *_dst,
ptrdiff_t dststride, int16_t *_src, \
@@ -441,26 +436,26 @@ void
ff_hevc_put_hevc_uni_w##W##_##bitd##_##opt(uint8_t *_dst, ptrdiff_t
dststri
     }
                                                  \
 }

-mc_rep_uni_w(8, 6, 12, sse4);
-mc_rep_uni_w(8, 8, 16, sse4);
-mc_rep_uni_w(8, 8, 24, sse4);
-mc_rep_uni_w(8, 8, 32, sse4);
-mc_rep_uni_w(8, 8, 48, sse4);
-mc_rep_uni_w(8, 8, 64, sse4);
-
-mc_rep_uni_w(10, 6, 12, sse4);
-mc_rep_uni_w(10, 8, 16, sse4);
-mc_rep_uni_w(10, 8, 24, sse4);
-mc_rep_uni_w(10, 8, 32, sse4);
-mc_rep_uni_w(10, 8, 48, sse4);
-mc_rep_uni_w(10, 8, 64, sse4);
-
-mc_rep_uni_w(12, 6, 12, sse4);
-mc_rep_uni_w(12, 8, 16, sse4);
-mc_rep_uni_w(12, 8, 24, sse4);
-mc_rep_uni_w(12, 8, 32, sse4);
-mc_rep_uni_w(12, 8, 48, sse4);
-mc_rep_uni_w(12, 8, 64, sse4);
+mc_rep_uni_w(8, 6, 12, sse4)
+mc_rep_uni_w(8, 8, 16, sse4)
+mc_rep_uni_w(8, 8, 24, sse4)
+mc_rep_uni_w(8, 8, 32, sse4)
+mc_rep_uni_w(8, 8, 48, sse4)
+mc_rep_uni_w(8, 8, 64, sse4)
+
+mc_rep_uni_w(10, 6, 12, sse4)
+mc_rep_uni_w(10, 8, 16, sse4)
+mc_rep_uni_w(10, 8, 24, sse4)
+mc_rep_uni_w(10, 8, 32, sse4)
+mc_rep_uni_w(10, 8, 48, sse4)
+mc_rep_uni_w(10, 8, 64, sse4)
+
+mc_rep_uni_w(12, 6, 12, sse4)
+mc_rep_uni_w(12, 8, 16, sse4)
+mc_rep_uni_w(12, 8, 24, sse4)
+mc_rep_uni_w(12, 8, 32, sse4)
+mc_rep_uni_w(12, 8, 48, sse4)
+mc_rep_uni_w(12, 8, 64, sse4)

 #define mc_rep_bi_w(bitd, step, W, opt) \
 void ff_hevc_put_hevc_bi_w##W##_##bitd##_##opt(uint8_t *_dst,
ptrdiff_t dststride, int16_t *_src, \
@@ -480,26 +475,26 @@ void
ff_hevc_put_hevc_bi_w##W##_##bitd##_##opt(uint8_t *_dst, ptrdiff_t
dststrid
     }
                                                  \
 }

-mc_rep_bi_w(8, 6, 12, sse4);
-mc_rep_bi_w(8, 8, 16, sse4);
-mc_rep_bi_w(8, 8, 24, sse4);
-mc_rep_bi_w(8, 8, 32, sse4);
-mc_rep_bi_w(8, 8, 48, sse4);
-mc_rep_bi_w(8, 8, 64, sse4);
-
-mc_rep_bi_w(10, 6, 12, sse4);
-mc_rep_bi_w(10, 8, 16, sse4);
-mc_rep_bi_w(10, 8, 24, sse4);
-mc_rep_bi_w(10, 8, 32, sse4);
-mc_rep_bi_w(10, 8, 48, sse4);
-mc_rep_bi_w(10, 8, 64, sse4);
-
-mc_rep_bi_w(12, 6, 12, sse4);
-mc_rep_bi_w(12, 8, 16, sse4);
-mc_rep_bi_w(12, 8, 24, sse4);
-mc_rep_bi_w(12, 8, 32, sse4);
-mc_rep_bi_w(12, 8, 48, sse4);
-mc_rep_bi_w(12, 8, 64, sse4);
+mc_rep_bi_w(8, 6, 12, sse4)
+mc_rep_bi_w(8, 8, 16, sse4)
+mc_rep_bi_w(8, 8, 24, sse4)
+mc_rep_bi_w(8, 8, 32, sse4)
+mc_rep_bi_w(8, 8, 48, sse4)
+mc_rep_bi_w(8, 8, 64, sse4)
+
+mc_rep_bi_w(10, 6, 12, sse4)
+mc_rep_bi_w(10, 8, 16, sse4)
+mc_rep_bi_w(10, 8, 24, sse4)
+mc_rep_bi_w(10, 8, 32, sse4)
+mc_rep_bi_w(10, 8, 48, sse4)
+mc_rep_bi_w(10, 8, 64, sse4)
+
+mc_rep_bi_w(12, 6, 12, sse4)
+mc_rep_bi_w(12, 8, 16, sse4)
+mc_rep_bi_w(12, 8, 24, sse4)
+mc_rep_bi_w(12, 8, 32, sse4)
+mc_rep_bi_w(12, 8, 48, sse4)
+mc_rep_bi_w(12, 8, 64, sse4)

 #define mc_uni_w_func(name, bitd, W, opt) \
 void ff_hevc_put_hevc_uni_w_##name##W##_##bitd##_##opt(uint8_t *_dst,
ptrdiff_t _dststride,         \
@@ -513,51 +508,51 @@ void
ff_hevc_put_hevc_uni_w_##name##W##_##bitd##_##opt(uint8_t *_dst,
ptrdiff_t
     ff_hevc_put_hevc_uni_w##W##_##bitd##_##opt(_dst, _dststride,
temp, height, denom, _wx, _ox);\
 }

-#define mc_uni_w_funcs(name, bitd, opt)       \
-        mc_uni_w_func(name, bitd, 4, opt);    \
-        mc_uni_w_func(name, bitd, 8, opt);    \
-        mc_uni_w_func(name, bitd, 12, opt);   \
-        mc_uni_w_func(name, bitd, 16, opt);   \
-        mc_uni_w_func(name, bitd, 24, opt);   \
-        mc_uni_w_func(name, bitd, 32, opt);   \
-        mc_uni_w_func(name, bitd, 48, opt);   \
+#define mc_uni_w_funcs(name, bitd, opt)      \
+        mc_uni_w_func(name, bitd, 4, opt)    \
+        mc_uni_w_func(name, bitd, 8, opt)    \
+        mc_uni_w_func(name, bitd, 12, opt)   \
+        mc_uni_w_func(name, bitd, 16, opt)   \
+        mc_uni_w_func(name, bitd, 24, opt)   \
+        mc_uni_w_func(name, bitd, 32, opt)   \
+        mc_uni_w_func(name, bitd, 48, opt)   \
         mc_uni_w_func(name, bitd, 64, opt)

-mc_uni_w_funcs(pel_pixels, 8, sse4);
-mc_uni_w_func(pel_pixels, 8, 6, sse4);
-mc_uni_w_funcs(epel_h, 8, sse4);
-mc_uni_w_func(epel_h, 8, 6, sse4);
-mc_uni_w_funcs(epel_v, 8, sse4);
-mc_uni_w_func(epel_v, 8, 6, sse4);
-mc_uni_w_funcs(epel_hv, 8, sse4);
-mc_uni_w_func(epel_hv, 8, 6, sse4);
-mc_uni_w_funcs(qpel_h, 8, sse4);
-mc_uni_w_funcs(qpel_v, 8, sse4);
-mc_uni_w_funcs(qpel_hv, 8, sse4);
-
-mc_uni_w_funcs(pel_pixels, 10, sse4);
-mc_uni_w_func(pel_pixels, 10, 6, sse4);
-mc_uni_w_funcs(epel_h, 10, sse4);
-mc_uni_w_func(epel_h, 10, 6, sse4);
-mc_uni_w_funcs(epel_v, 10, sse4);
-mc_uni_w_func(epel_v, 10, 6, sse4);
-mc_uni_w_funcs(epel_hv, 10, sse4);
-mc_uni_w_func(epel_hv, 10, 6, sse4);
-mc_uni_w_funcs(qpel_h, 10, sse4);
-mc_uni_w_funcs(qpel_v, 10, sse4);
-mc_uni_w_funcs(qpel_hv, 10, sse4);
-
-mc_uni_w_funcs(pel_pixels, 12, sse4);
-mc_uni_w_func(pel_pixels, 12, 6, sse4);
-mc_uni_w_funcs(epel_h, 12, sse4);
-mc_uni_w_func(epel_h, 12, 6, sse4);
-mc_uni_w_funcs(epel_v, 12, sse4);
-mc_uni_w_func(epel_v, 12, 6, sse4);
-mc_uni_w_funcs(epel_hv, 12, sse4);
-mc_uni_w_func(epel_hv, 12, 6, sse4);
-mc_uni_w_funcs(qpel_h, 12, sse4);
-mc_uni_w_funcs(qpel_v, 12, sse4);
-mc_uni_w_funcs(qpel_hv, 12, sse4);
+mc_uni_w_funcs(pel_pixels, 8, sse4)
+mc_uni_w_func(pel_pixels, 8, 6, sse4)
+mc_uni_w_funcs(epel_h, 8, sse4)
+mc_uni_w_func(epel_h, 8, 6, sse4)
+mc_uni_w_funcs(epel_v, 8, sse4)
+mc_uni_w_func(epel_v, 8, 6, sse4)
+mc_uni_w_funcs(epel_hv, 8, sse4)
+mc_uni_w_func(epel_hv, 8, 6, sse4)
+mc_uni_w_funcs(qpel_h, 8, sse4)
+mc_uni_w_funcs(qpel_v, 8, sse4)
+mc_uni_w_funcs(qpel_hv, 8, sse4)
+
+mc_uni_w_funcs(pel_pixels, 10, sse4)
+mc_uni_w_func(pel_pixels, 10, 6, sse4)
+mc_uni_w_funcs(epel_h, 10, sse4)
+mc_uni_w_func(epel_h, 10, 6, sse4)
+mc_uni_w_funcs(epel_v, 10, sse4)
+mc_uni_w_func(epel_v, 10, 6, sse4)
+mc_uni_w_funcs(epel_hv, 10, sse4)
+mc_uni_w_func(epel_hv, 10, 6, sse4)
+mc_uni_w_funcs(qpel_h, 10, sse4)
+mc_uni_w_funcs(qpel_v, 10, sse4)
+mc_uni_w_funcs(qpel_hv, 10, sse4)
+
+mc_uni_w_funcs(pel_pixels, 12, sse4)
+mc_uni_w_func(pel_pixels, 12, 6, sse4)
+mc_uni_w_funcs(epel_h, 12, sse4)
+mc_uni_w_func(epel_h, 12, 6, sse4)
+mc_uni_w_funcs(epel_v, 12, sse4)
+mc_uni_w_func(epel_v, 12, 6, sse4)
+mc_uni_w_funcs(epel_hv, 12, sse4)
+mc_uni_w_func(epel_hv, 12, 6, sse4)
+mc_uni_w_funcs(qpel_h, 12, sse4)
+mc_uni_w_funcs(qpel_v, 12, sse4)
+mc_uni_w_funcs(qpel_hv, 12, sse4)

 #define mc_bi_w_func(name, bitd, W, opt) \
 void ff_hevc_put_hevc_bi_w_##name##W##_##bitd##_##opt(uint8_t *_dst,
ptrdiff_t _dststride,           \
@@ -573,51 +568,51 @@ void
ff_hevc_put_hevc_bi_w_##name##W##_##bitd##_##opt(uint8_t *_dst,
ptrdiff_t _
                                               height, denom, _wx0,
_wx1, _ox0, _ox1);                \
 }

-#define mc_bi_w_funcs(name, bitd, opt)       \
-        mc_bi_w_func(name, bitd, 4, opt);    \
-        mc_bi_w_func(name, bitd, 8, opt);    \
-        mc_bi_w_func(name, bitd, 12, opt);   \
-        mc_bi_w_func(name, bitd, 16, opt);   \
-        mc_bi_w_func(name, bitd, 24, opt);   \
-        mc_bi_w_func(name, bitd, 32, opt);   \
-        mc_bi_w_func(name, bitd, 48, opt);   \
+#define mc_bi_w_funcs(name, bitd, opt)      \
+        mc_bi_w_func(name, bitd, 4, opt)    \
+        mc_bi_w_func(name, bitd, 8, opt)    \
+        mc_bi_w_func(name, bitd, 12, opt)   \
+        mc_bi_w_func(name, bitd, 16, opt)   \
+        mc_bi_w_func(name, bitd, 24, opt)   \
+        mc_bi_w_func(name, bitd, 32, opt)   \
+        mc_bi_w_func(name, bitd, 48, opt)   \
         mc_bi_w_func(name, bitd, 64, opt)

-mc_bi_w_funcs(pel_pixels, 8, sse4);
-mc_bi_w_func(pel_pixels, 8, 6, sse4);
-mc_bi_w_funcs(epel_h, 8, sse4);
-mc_bi_w_func(epel_h, 8, 6, sse4);
-mc_bi_w_funcs(epel_v, 8, sse4);
-mc_bi_w_func(epel_v, 8, 6, sse4);
-mc_bi_w_funcs(epel_hv, 8, sse4);
-mc_bi_w_func(epel_hv, 8, 6, sse4);
-mc_bi_w_funcs(qpel_h, 8, sse4);
-mc_bi_w_funcs(qpel_v, 8, sse4);
-mc_bi_w_funcs(qpel_hv, 8, sse4);
-
-mc_bi_w_funcs(pel_pixels, 10, sse4);
-mc_bi_w_func(pel_pixels, 10, 6, sse4);
-mc_bi_w_funcs(epel_h, 10, sse4);
-mc_bi_w_func(epel_h, 10, 6, sse4);
-mc_bi_w_funcs(epel_v, 10, sse4);
-mc_bi_w_func(epel_v, 10, 6, sse4);
-mc_bi_w_funcs(epel_hv, 10, sse4);
-mc_bi_w_func(epel_hv, 10, 6, sse4);
-mc_bi_w_funcs(qpel_h, 10, sse4);
-mc_bi_w_funcs(qpel_v, 10, sse4);
-mc_bi_w_funcs(qpel_hv, 10, sse4);
-
-mc_bi_w_funcs(pel_pixels, 12, sse4);
-mc_bi_w_func(pel_pixels, 12, 6, sse4);
-mc_bi_w_funcs(epel_h, 12, sse4);
-mc_bi_w_func(epel_h, 12, 6, sse4);
-mc_bi_w_funcs(epel_v, 12, sse4);
-mc_bi_w_func(epel_v, 12, 6, sse4);
-mc_bi_w_funcs(epel_hv, 12, sse4);
-mc_bi_w_func(epel_hv, 12, 6, sse4);
-mc_bi_w_funcs(qpel_h, 12, sse4);
-mc_bi_w_funcs(qpel_v, 12, sse4);
-mc_bi_w_funcs(qpel_hv, 12, sse4);
+mc_bi_w_funcs(pel_pixels, 8, sse4)
+mc_bi_w_func(pel_pixels, 8, 6, sse4)
+mc_bi_w_funcs(epel_h, 8, sse4)
+mc_bi_w_func(epel_h, 8, 6, sse4)
+mc_bi_w_funcs(epel_v, 8, sse4)
+mc_bi_w_func(epel_v, 8, 6, sse4)
+mc_bi_w_funcs(epel_hv, 8, sse4)
+mc_bi_w_func(epel_hv, 8, 6, sse4)
+mc_bi_w_funcs(qpel_h, 8, sse4)
+mc_bi_w_funcs(qpel_v, 8, sse4)
+mc_bi_w_funcs(qpel_hv, 8, sse4)
+
+mc_bi_w_funcs(pel_pixels, 10, sse4)
+mc_bi_w_func(pel_pixels, 10, 6, sse4)
+mc_bi_w_funcs(epel_h, 10, sse4)
+mc_bi_w_func(epel_h, 10, 6, sse4)
+mc_bi_w_funcs(epel_v, 10, sse4)
+mc_bi_w_func(epel_v, 10, 6, sse4)
+mc_bi_w_funcs(epel_hv, 10, sse4)
+mc_bi_w_func(epel_hv, 10, 6, sse4)
+mc_bi_w_funcs(qpel_h, 10, sse4)
+mc_bi_w_funcs(qpel_v, 10, sse4)
+mc_bi_w_funcs(qpel_hv, 10, sse4)
+
+mc_bi_w_funcs(pel_pixels, 12, sse4)
+mc_bi_w_func(pel_pixels, 12, 6, sse4)
+mc_bi_w_funcs(epel_h, 12, sse4)
+mc_bi_w_func(epel_h, 12, 6, sse4)
+mc_bi_w_funcs(epel_v, 12, sse4)
+mc_bi_w_func(epel_v, 12, 6, sse4)
+mc_bi_w_funcs(epel_hv, 12, sse4)
+mc_bi_w_func(epel_hv, 12, 6, sse4)
+mc_bi_w_funcs(qpel_h, 12, sse4)
+mc_bi_w_funcs(qpel_v, 12, sse4)
+mc_bi_w_funcs(qpel_hv, 12, sse4)
 #endif //ARCH_X86_64 && HAVE_SSE4_EXTERNAL

 #define SAO_BAND_FILTER_FUNCS(bitd, opt)
                                                     \
@@ -630,17 +625,17 @@ void
ff_hevc_sao_band_filter_32_##bitd##_##opt(uint8_t *_dst, uint8_t
*_src, ptr
 void ff_hevc_sao_band_filter_48_##bitd##_##opt(uint8_t *_dst, uint8_t
*_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src, \
                                             int16_t *sao_offset_val,
int sao_left_class, int width, int height);           \
 void ff_hevc_sao_band_filter_64_##bitd##_##opt(uint8_t *_dst, uint8_t
*_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src, \
-                                             int16_t *sao_offset_val,
int sao_left_class, int width, int height)
-
-SAO_BAND_FILTER_FUNCS(8,  sse2);
-SAO_BAND_FILTER_FUNCS(10, sse2);
-SAO_BAND_FILTER_FUNCS(12, sse2);
-SAO_BAND_FILTER_FUNCS(8,   avx);
-SAO_BAND_FILTER_FUNCS(10,  avx);
-SAO_BAND_FILTER_FUNCS(12,  avx);
-SAO_BAND_FILTER_FUNCS(8,  avx2);
-SAO_BAND_FILTER_FUNCS(10, avx2);
-SAO_BAND_FILTER_FUNCS(12, avx2);
+                                             int16_t *sao_offset_val,
int sao_left_class, int width, int height);
+
+SAO_BAND_FILTER_FUNCS(8,  sse2)
+SAO_BAND_FILTER_FUNCS(10, sse2)
+SAO_BAND_FILTER_FUNCS(12, sse2)
+SAO_BAND_FILTER_FUNCS(8,   avx)
+SAO_BAND_FILTER_FUNCS(10,  avx)
+SAO_BAND_FILTER_FUNCS(12,  avx)
+SAO_BAND_FILTER_FUNCS(8,  avx2)
+SAO_BAND_FILTER_FUNCS(10, avx2)
+SAO_BAND_FILTER_FUNCS(12, avx2)

 #define SAO_BAND_INIT(bitd, opt) do {                                       \
     c->sao_band_filter[0]      = ff_hevc_sao_band_filter_8_##bitd##_##opt;  \
@@ -662,12 +657,12 @@ void
ff_hevc_sao_edge_filter_48_##bitd##_##opt(uint8_t *_dst, uint8_t
*_src, ptr
 void ff_hevc_sao_edge_filter_64_##bitd##_##opt(uint8_t *_dst, uint8_t
*_src, ptrdiff_t stride_dst, int16_t *sao_offset_val, \
                                                int eo, int width, int
height);                                              \

-SAO_EDGE_FILTER_FUNCS(8, ssse3);
-SAO_EDGE_FILTER_FUNCS(8, avx2);
-SAO_EDGE_FILTER_FUNCS(10, sse2);
-SAO_EDGE_FILTER_FUNCS(10, avx2);
-SAO_EDGE_FILTER_FUNCS(12, sse2);
-SAO_EDGE_FILTER_FUNCS(12, avx2);
+SAO_EDGE_FILTER_FUNCS(8, ssse3)
+SAO_EDGE_FILTER_FUNCS(8, avx2)
+SAO_EDGE_FILTER_FUNCS(10, sse2)
+SAO_EDGE_FILTER_FUNCS(10, avx2)
+SAO_EDGE_FILTER_FUNCS(12, sse2)
+SAO_EDGE_FILTER_FUNCS(12, avx2)

 #define SAO_EDGE_INIT(bitd, opt) do {                                       \
     c->sao_edge_filter[0]      = ff_hevc_sao_edge_filter_8_##bitd##_##opt;  \
diff --git a/libavcodec/x86/rv40dsp_init.c b/libavcodec/x86/rv40dsp_init.c
index 12b85c9..218deb8 100644
--- a/libavcodec/x86/rv40dsp_init.c
+++ b/libavcodec/x86/rv40dsp_init.c
@@ -101,7 +101,7 @@ static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV
##OPT(uint8_t *dst,  \
             ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i,     \
                                           stride, SIZE, HCOFF(PH));     \
     }                                                                   \
-};
+}

 /** Declare functions for sizes 8 and 16 and given operations
  *  and qpel position. */
diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c
index 2347a47..8efb18c 100644
--- a/libavcodec/x86/vp9dsp_init.c
+++ b/libavcodec/x86/vp9dsp_init.c
@@ -53,16 +53,16 @@ decl_mc_funcs(16, ssse3, int8_t, 32, 8);
 decl_mc_funcs(32, avx2, int8_t, 32, 8);
 #endif

-mc_rep_funcs(16,  8,  8,  sse2, int16_t,  8, 8);
+mc_rep_funcs(16,  8,  8,  sse2, int16_t,  8, 8)
 #if ARCH_X86_32
-mc_rep_funcs(16,  8,  8, ssse3, int8_t,  32, 8);
+mc_rep_funcs(16,  8,  8, ssse3, int8_t,  32, 8)
 #endif
-mc_rep_funcs(32, 16, 16, sse2,  int16_t,  8, 8);
-mc_rep_funcs(32, 16, 16, ssse3, int8_t,  32, 8);
-mc_rep_funcs(64, 32, 32, sse2,  int16_t,  8, 8);
-mc_rep_funcs(64, 32, 32, ssse3, int8_t,  32, 8);
+mc_rep_funcs(32, 16, 16, sse2,  int16_t,  8, 8)
+mc_rep_funcs(32, 16, 16, ssse3, int8_t,  32, 8)
+mc_rep_funcs(64, 32, 32, sse2,  int16_t,  8, 8)
+mc_rep_funcs(64, 32, 32, ssse3, int8_t,  32, 8)
 #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
-mc_rep_funcs(64, 32, 32, avx2,  int8_t,  32, 8);
+mc_rep_funcs(64, 32, 32, avx2,  int8_t,  32, 8)
 #endif

 extern const int8_t ff_filters_ssse3[3][15][4][32];
diff --git a/libavcodec/x86/vp9dsp_init.h b/libavcodec/x86/vp9dsp_init.h
index b3b0558..e410cab 100644
--- a/libavcodec/x86/vp9dsp_init.h
+++ b/libavcodec/x86/vp9dsp_init.h
@@ -81,9 +81,9 @@
ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##bpp##_##opt(uint8_t *dst,
ptrdiff_t dst_
 }

 #define mc_rep_funcs(sz, hsz, hszb, opt, type, fsz, bpp) \
-mc_rep_func(put, sz, hsz, hszb, h, opt, type, fsz, bpp); \
-mc_rep_func(avg, sz, hsz, hszb, h, opt, type, fsz, bpp); \
-mc_rep_func(put, sz, hsz, hszb, v, opt, type, fsz, bpp); \
+mc_rep_func(put, sz, hsz, hszb, h, opt, type, fsz, bpp) \
+mc_rep_func(avg, sz, hsz, hszb, h, opt, type, fsz, bpp) \
+mc_rep_func(put, sz, hsz, hszb, v, opt, type, fsz, bpp) \
 mc_rep_func(avg, sz, hsz, hszb, v, opt, type, fsz, bpp)

 #define filter_8tap_1d_fn(op, sz, f, f_opt, fname, dir, dvar, bpp, opt) \
diff --git a/libavcodec/x86/vp9dsp_init_16bpp_template.c
b/libavcodec/x86/vp9dsp_init_16bpp_template.c
index 5ad9e81..90cdcc9 100644
--- a/libavcodec/x86/vp9dsp_init_16bpp_template.c
+++ b/libavcodec/x86/vp9dsp_init_16bpp_template.c
@@ -35,12 +35,12 @@ decl_mc_funcs(4, sse2, int16_t, 16, BPC);
 decl_mc_funcs(8, sse2, int16_t, 16, BPC);
 decl_mc_funcs(16, avx2, int16_t, 16, BPC);

-mc_rep_funcs(16,  8, 16, sse2, int16_t, 16, BPC);
-mc_rep_funcs(32, 16, 32, sse2, int16_t, 16, BPC);
-mc_rep_funcs(64, 32, 64, sse2, int16_t, 16, BPC);
+mc_rep_funcs(16,  8, 16, sse2, int16_t, 16, BPC)
+mc_rep_funcs(32, 16, 32, sse2, int16_t, 16, BPC)
+mc_rep_funcs(64, 32, 64, sse2, int16_t, 16, BPC)
 #if HAVE_AVX2_EXTERNAL
-mc_rep_funcs(32, 16, 32, avx2, int16_t, 16, BPC);
-mc_rep_funcs(64, 32, 64, avx2, int16_t, 16, BPC);
+mc_rep_funcs(32, 16, 32, avx2, int16_t, 16, BPC)
+mc_rep_funcs(64, 32, 64, avx2, int16_t, 16, BPC)
 #endif

 filters_8tap_2d_fn2(put, 16, BPC, 2, sse2, sse2, 16bpp)
@@ -91,12 +91,12 @@ static void
loop_filter_##dir##_16_##bpp##_##opt(uint8_t *dst, ptrdiff_t stride,
 }

 #define lpf_16_wrappers(bpp, opt) \
-lpf_16_wrapper(h, 8 * stride, bpp, opt); \
+lpf_16_wrapper(h, 8 * stride, bpp, opt) \
 lpf_16_wrapper(v, 16,         bpp, opt)

-lpf_16_wrappers(BPC, sse2);
-lpf_16_wrappers(BPC, ssse3);
-lpf_16_wrappers(BPC, avx);
+lpf_16_wrappers(BPC, sse2)
+lpf_16_wrappers(BPC, ssse3)
+lpf_16_wrappers(BPC, avx)

 #define lpf_mix2_wrapper(dir, off, wd1, wd2, bpp, opt) \
 static void loop_filter_##dir##_##wd1##wd2##_##bpp##_##opt(uint8_t
*dst, ptrdiff_t stride, \
@@ -109,18 +109,18 @@ static void
loop_filter_##dir##_##wd1##wd2##_##bpp##_##opt(uint8_t *dst, ptrdiff
 }

 #define lpf_mix2_wrappers(wd1, wd2, bpp, opt) \
-lpf_mix2_wrapper(h, 8 * stride, wd1, wd2, bpp, opt); \
+lpf_mix2_wrapper(h, 8 * stride, wd1, wd2, bpp, opt) \
 lpf_mix2_wrapper(v, 16,         wd1, wd2, bpp, opt)

 #define lpf_mix2_wrappers_set(bpp, opt) \
-lpf_mix2_wrappers(4, 4, bpp, opt); \
-lpf_mix2_wrappers(4, 8, bpp, opt); \
-lpf_mix2_wrappers(8, 4, bpp, opt); \
-lpf_mix2_wrappers(8, 8, bpp, opt); \
-
-lpf_mix2_wrappers_set(BPC, sse2);
-lpf_mix2_wrappers_set(BPC, ssse3);
-lpf_mix2_wrappers_set(BPC, avx);
+lpf_mix2_wrappers(4, 4, bpp, opt) \
+lpf_mix2_wrappers(4, 8, bpp, opt) \
+lpf_mix2_wrappers(8, 4, bpp, opt) \
+lpf_mix2_wrappers(8, 8, bpp, opt) \
+
+lpf_mix2_wrappers_set(BPC, sse2)
+lpf_mix2_wrappers_set(BPC, ssse3)
+lpf_mix2_wrappers_set(BPC, avx)

 decl_ipred_fns(tm, BPC, mmxext, sse2);

diff --git a/libavdevice/alsa.c b/libavdevice/alsa.c
index 27a1655..75ac444 100644
--- a/libavdevice/alsa.c
+++ b/libavdevice/alsa.c
@@ -89,7 +89,7 @@ MAKE_REORDER_FUNCS(5, out_50, \
         out[2] = in[3]; \
         out[3] = in[4]; \
         out[4] = in[2]; \
-        );
+        )

 MAKE_REORDER_FUNCS(6, out_51, \
         out[0] = in[0]; \
@@ -98,7 +98,7 @@ MAKE_REORDER_FUNCS(6, out_51, \
         out[3] = in[5]; \
         out[4] = in[2]; \
         out[5] = in[3]; \
-        );
+        )

 MAKE_REORDER_FUNCS(8, out_71, \
         out[0] = in[0]; \
@@ -109,7 +109,7 @@ MAKE_REORDER_FUNCS(8, out_71, \
         out[5] = in[3]; \
         out[6] = in[6]; \
         out[7] = in[7]; \
-        );
+        )

 #define FORMAT_I8  0
 #define FORMAT_I16 1
-- 
2.6.2
-------------- next part --------------
A non-text attachment was scrubbed...
Name: 0001-all-fix-Wextra-semi-reported-on-clang.patch
Type: text/x-diff
Size: 37465 bytes
Desc: not available
URL: <http://ffmpeg.org/pipermail/ffmpeg-devel/attachments/20151024/bed06d5d/attachment.patch>


More information about the ffmpeg-devel mailing list