[FFmpeg-cvslog] swscale/x86/input: add AVX2 optimized RGB32 to YUV functions
James Almer
git at videolan.org
Sun Jun 9 20:32:46 EEST 2024
ffmpeg | branch: master | James Almer <jamrial at gmail.com> | Wed Jun 5 00:31:48 2024 -0300| [e9cfd532579cb33cd814ce7fb9e7480cd7054750] | committer: James Almer
swscale/x86/input: add AVX2 optimized RGB32 to YUV functions
abgr_to_uv_8_c: 43.3
abgr_to_uv_8_sse2: 14.3
abgr_to_uv_8_avx: 15.3
abgr_to_uv_8_avx2: 18.8
abgr_to_uv_128_c: 650.3
abgr_to_uv_128_sse2: 110.8
abgr_to_uv_128_avx: 112.3
abgr_to_uv_128_avx2: 64.8
abgr_to_uv_1080_c: 5456.3
abgr_to_uv_1080_sse2: 888.8
abgr_to_uv_1080_avx: 900.8
abgr_to_uv_1080_avx2: 518.3
abgr_to_uv_1920_c: 9692.3
abgr_to_uv_1920_sse2: 1593.8
abgr_to_uv_1920_avx: 1613.3
abgr_to_uv_1920_avx2: 864.8
abgr_to_y_8_c: 23.3
abgr_to_y_8_sse2: 12.8
abgr_to_y_8_avx: 13.3
abgr_to_y_8_avx2: 17.3
abgr_to_y_128_c: 308.3
abgr_to_y_128_sse2: 67.3
abgr_to_y_128_avx: 66.8
abgr_to_y_128_avx2: 44.8
abgr_to_y_1080_c: 2371.3
abgr_to_y_1080_sse2: 512.8
abgr_to_y_1080_avx: 505.8
abgr_to_y_1080_avx2: 314.3
abgr_to_y_1920_c: 4177.3
abgr_to_y_1920_sse2: 915.8
abgr_to_y_1920_avx: 926.8
abgr_to_y_1920_avx2: 519.3
bgra_to_uv_8_c: 37.3
bgra_to_uv_8_sse2: 13.3
bgra_to_uv_8_avx: 14.8
bgra_to_uv_8_avx2: 19.8
bgra_to_uv_128_c: 563.8
bgra_to_uv_128_sse2: 111.3
bgra_to_uv_128_avx: 112.3
bgra_to_uv_128_avx2: 64.8
bgra_to_uv_1080_c: 4691.8
bgra_to_uv_1080_sse2: 893.8
bgra_to_uv_1080_avx: 899.8
bgra_to_uv_1080_avx2: 517.8
bgra_to_uv_1920_c: 8332.8
bgra_to_uv_1920_sse2: 1590.8
bgra_to_uv_1920_avx: 1605.8
bgra_to_uv_1920_avx2: 867.3
bgra_to_y_8_c: 22.3
bgra_to_y_8_sse2: 12.8
bgra_to_y_8_avx: 12.8
bgra_to_y_8_avx2: 17.3
bgra_to_y_128_c: 291.3
bgra_to_y_128_sse2: 67.8
bgra_to_y_128_avx: 69.3
bgra_to_y_128_avx2: 45.3
bgra_to_y_1080_c: 2357.3
bgra_to_y_1080_sse2: 508.3
bgra_to_y_1080_avx: 518.3
bgra_to_y_1080_avx2: 399.8
bgra_to_y_1920_c: 4202.8
bgra_to_y_1920_sse2: 906.8
bgra_to_y_1920_avx: 907.3
bgra_to_y_1920_avx2: 526.3
Signed-off-by: James Almer <jamrial at gmail.com>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=e9cfd532579cb33cd814ce7fb9e7480cd7054750
---
libswscale/x86/input.asm | 102 ++++++++++++++++++++++++++++++++++-------------
libswscale/x86/swscale.c | 8 ++++
2 files changed, 82 insertions(+), 28 deletions(-)
diff --git a/libswscale/x86/input.asm b/libswscale/x86/input.asm
index 5277d90b28..21cd8b37fd 100644
--- a/libswscale/x86/input.asm
+++ b/libswscale/x86/input.asm
@@ -382,8 +382,13 @@ RGB24_FUNCS 11, 13, u
; %2-5 = rgba, bgra, argb or abgr (in individual characters)
%macro RGB32_TO_Y_FN 5-6
cglobal %2%3%4%5 %+ ToY, 6, 6, %1, dst, src, u1, u2, w, table
+%if mmsize == 32
+ vbroadcasti128 m5, [rgba_Ycoeff_%2%4]
+ vbroadcasti128 m6, [rgba_Ycoeff_%3%5]
+%else
mova m5, [rgba_Ycoeff_%2%4]
mova m6, [rgba_Ycoeff_%3%5]
+%endif
%if %0 == 6
jmp mangle(private_prefix %+ _ %+ %6 %+ ToY %+ SUFFIX).body
%else ; %0 == 6
@@ -396,13 +401,21 @@ cglobal %2%3%4%5 %+ ToY, 6, 6, %1, dst, src, u1, u2, w, table
lea srcq, [srcq+wq*2]
add dstq, wq
neg wq
+%if mmsize == 32
+ vbroadcasti128 m4, [rgb_Yrnd]
+%else
mova m4, [rgb_Yrnd]
+%endif
pcmpeqb m7, m7
psrlw m7, 8 ; (word) { 0x00ff } x4
.loop:
; FIXME check alignment and use mova
- movu m0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
- movu m2, [srcq+wq*2+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
+ movu xm0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
+ movu xm2, [srcq+wq*2+16] ; (byte) { Bx, Gx, Rx, xx }[4-7]
+%if mmsize == 32
+ vinserti128 m0, m0, [srcq+wq*2+32], 1
+ vinserti128 m2, m2, [srcq+wq*2+48], 1
+%endif
DEINTB 1, 0, 3, 2, 7 ; (word) { Gx, xx (m0/m2) or Bx, Rx (m1/m3) }[0-3]/[4-7]
pmaddwd m1, m5 ; (dword) { Bx*BY + Rx*RY }[0-3]
pmaddwd m0, m6 ; (dword) { Gx*GY }[0-3]
@@ -423,6 +436,7 @@ cglobal %2%3%4%5 %+ ToY, 6, 6, %1, dst, src, u1, u2, w, table
add srcq, 2*mmsize - 2
add dstq, mmsize - 1
.loop2:
+INIT_XMM cpuname
movd m0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
DEINTB 1, 0, 3, 2, 7 ; (word) { Gx, xx (m0/m2) or Bx, Rx (m1/m3) }[0-3]/[4-7]
pmaddwd m1, m5 ; (dword) { Bx*BY + Rx*RY }[0-3]
@@ -435,32 +449,43 @@ cglobal %2%3%4%5 %+ ToY, 6, 6, %1, dst, src, u1, u2, w, table
add wq, 2
jl .loop2
.end:
+%if cpuflag(avx2)
+INIT_YMM cpuname
+%endif
RET
%endif ; %0 == 3
%endmacro
; %1 = nr. of XMM registers
-; %2-5 = rgba, bgra, argb or abgr (in individual characters)
-%macro RGB32_TO_UV_FN 5-6
-cglobal %2%3%4%5 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
+; %2 = aligned/unaligned output argument
+; %3-6 = rgba, bgra, argb or abgr (in individual characters)
+%macro RGB32_TO_UV_FN 6-7
+cglobal %3%4%5%6 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
%if ARCH_X86_64
- mova m8, [rgba_Ucoeff_%2%4]
- mova m9, [rgba_Ucoeff_%3%5]
- mova m10, [rgba_Vcoeff_%2%4]
- mova m11, [rgba_Vcoeff_%3%5]
+%if mmsize == 32
+ vbroadcasti128 m8, [rgba_Ucoeff_%3%5]
+ vbroadcasti128 m9, [rgba_Ucoeff_%4%6]
+ vbroadcasti128 m10, [rgba_Vcoeff_%3%5]
+ vbroadcasti128 m11, [rgba_Vcoeff_%4%6]
+%else
+ mova m8, [rgba_Ucoeff_%3%5]
+ mova m9, [rgba_Ucoeff_%4%6]
+ mova m10, [rgba_Vcoeff_%3%5]
+ mova m11, [rgba_Vcoeff_%4%6]
+%endif
%define coeffU1 m8
%define coeffU2 m9
%define coeffV1 m10
%define coeffV2 m11
%else ; x86-32
-%define coeffU1 [rgba_Ucoeff_%2%4]
-%define coeffU2 [rgba_Ucoeff_%3%5]
-%define coeffV1 [rgba_Vcoeff_%2%4]
-%define coeffV2 [rgba_Vcoeff_%3%5]
+%define coeffU1 [rgba_Ucoeff_%3%5]
+%define coeffU2 [rgba_Ucoeff_%4%6]
+%define coeffV1 [rgba_Vcoeff_%3%5]
+%define coeffV2 [rgba_Vcoeff_%4%6]
%endif ; x86-64/32
-%if ARCH_X86_64 && %0 == 6
- jmp mangle(private_prefix %+ _ %+ %6 %+ ToUV %+ SUFFIX).body
-%else ; ARCH_X86_64 && %0 == 6
+%if ARCH_X86_64 && %0 == 7
+ jmp mangle(private_prefix %+ _ %+ %7 %+ ToUV %+ SUFFIX).body
+%else ; ARCH_X86_64 && %0 == 7
.body:
%if ARCH_X86_64
movsxd wq, dword r5m
@@ -475,11 +500,19 @@ cglobal %2%3%4%5 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
neg wq
pcmpeqb m7, m7
psrlw m7, 8 ; (word) { 0x00ff } x4
+%if mmsize == 32
+ vbroadcasti128 m6, [rgb_UVrnd]
+%else
mova m6, [rgb_UVrnd]
+%endif
.loop:
; FIXME check alignment and use mova
- movu m0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
- movu m4, [srcq+wq*2+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
+ movu xm0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
+ movu xm4, [srcq+wq*2+16] ; (byte) { Bx, Gx, Rx, xx }[4-7]
+%if mmsize == 32
+ vinserti128 m0, m0, [srcq+wq*2+32], 1
+ vinserti128 m4, m4, [srcq+wq*2+48], 1
+%endif
DEINTB 1, 0, 5, 4, 7 ; (word) { Gx, xx (m0/m4) or Bx, Rx (m1/m5) }[0-3]/[4-7]
pmaddwd m3, m1, coeffV1 ; (dword) { Bx*BV + Rx*RV }[0-3]
pmaddwd m2, m0, coeffV2 ; (dword) { Gx*GV }[0-3]
@@ -503,8 +536,9 @@ cglobal %2%3%4%5 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
psrad m1, 9
packssdw m0, m4 ; (word) { U[0-7] }
packssdw m2, m1 ; (word) { V[0-7] }
- mova [dstUq+wq], m0
- mova [dstVq+wq], m2
+ ; FIXME check alignment and use mova
+ mov%2 [dstUq+wq], m0
+ mov%2 [dstVq+wq], m2
add wq, mmsize
jl .loop
sub wq, mmsize - 1
@@ -513,6 +547,7 @@ cglobal %2%3%4%5 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
add dstUq, mmsize - 1
add dstVq, mmsize - 1
.loop2:
+INIT_XMM cpuname
movd m0, [srcq+wq*2] ; (byte) { Bx, Gx, Rx, xx }[0-3]
DEINTB 1, 0, 5, 4, 7 ; (word) { Gx, xx (m0/m4) or Bx, Rx (m1/m5) }[0-3]/[4-7]
pmaddwd m3, m1, coeffV1 ; (dword) { Bx*BV + Rx*RV }[0-3]
@@ -532,30 +567,41 @@ cglobal %2%3%4%5 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
add wq, 2
jl .loop2
.end:
+%if cpuflag(avx2)
+INIT_YMM cpuname
+%endif
RET
-%endif ; ARCH_X86_64 && %0 == 3
+%endif ; ARCH_X86_64 && %0 == 7
%endmacro
; %1 = nr. of XMM registers for rgb-to-Y func
; %2 = nr. of XMM registers for rgb-to-UV func
-%macro RGB32_FUNCS 2
+; %3 = aligned/unaligned output argument
+%macro RGB32_FUNCS 3
RGB32_TO_Y_FN %1, r, g, b, a
RGB32_TO_Y_FN %1, b, g, r, a, rgba
RGB32_TO_Y_FN %1, a, r, g, b, rgba
RGB32_TO_Y_FN %1, a, b, g, r, rgba
-RGB32_TO_UV_FN %2, r, g, b, a
-RGB32_TO_UV_FN %2, b, g, r, a, rgba
-RGB32_TO_UV_FN %2, a, r, g, b, rgba
-RGB32_TO_UV_FN %2, a, b, g, r, rgba
+RGB32_TO_UV_FN %2, %3, r, g, b, a
+RGB32_TO_UV_FN %2, %3, b, g, r, a, rgba
+RGB32_TO_UV_FN %2, %3, a, r, g, b, rgba
+RGB32_TO_UV_FN %2, %3, a, b, g, r, rgba
%endmacro
INIT_XMM sse2
-RGB32_FUNCS 8, 12
+RGB32_FUNCS 8, 12, a
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
-RGB32_FUNCS 8, 12
+RGB32_FUNCS 8, 12, a
+%endif
+
+%if ARCH_X86_64
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+RGB32_FUNCS 8, 12, u
+%endif
%endif
;-----------------------------------------------------------------------------
diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c
index 1438c077e6..5a9da23265 100644
--- a/libswscale/x86/swscale.c
+++ b/libswscale/x86/swscale.c
@@ -321,6 +321,10 @@ void ff_ ## fmt ## ToUV_ ## opt(uint8_t *dstU, uint8_t *dstV, \
INPUT_FUNCS(sse2);
INPUT_FUNCS(ssse3);
INPUT_FUNCS(avx);
+INPUT_FUNC(rgba, avx2);
+INPUT_FUNC(bgra, avx2);
+INPUT_FUNC(argb, avx2);
+INPUT_FUNC(abgr, avx2);
INPUT_FUNC(rgb24, avx2);
INPUT_FUNC(bgr24, avx2);
@@ -640,6 +644,10 @@ switch(c->dstBpc){ \
switch (c->srcFormat) {
case_rgb(rgb24, RGB24, avx2);
case_rgb(bgr24, BGR24, avx2);
+ case_rgb(bgra, BGRA, avx2);
+ case_rgb(rgba, RGBA, avx2);
+ case_rgb(abgr, ABGR, avx2);
+ case_rgb(argb, ARGB, avx2);
}
switch (c->dstFormat) {
case AV_PIX_FMT_NV12:
More information about the ffmpeg-cvslog
mailing list