Go to the documentation of this file.
24 #define YUV_TO_RGB_TABLE \
25 c->yuv2rgb_v2r_coeff, \
26 c->yuv2rgb_u2g_coeff, \
27 c->yuv2rgb_v2g_coeff, \
28 c->yuv2rgb_u2b_coeff, \
30 #define DECLARE_FF_YUVX_TO_RGBX_FUNCS(ifmt, ofmt) \
31 int ff_##ifmt##_to_##ofmt##_neon(int w, int h, \
32 uint8_t *dst, int linesize, \
33 const uint8_t *srcY, int linesizeY, \
34 const uint8_t *srcU, int linesizeU, \
35 const uint8_t *srcV, int linesizeV, \
36 const int16_t *table, \
40 static int ifmt##_to_##ofmt##_neon_wrapper(SwsInternal *c, const uint8_t *const src[], \
41 const int srcStride[], int srcSliceY, \
42 int srcSliceH, uint8_t *const dst[], \
43 const int dstStride[]) { \
44 const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE }; \
46 return ff_##ifmt##_to_##ofmt##_neon(c->srcW, srcSliceH, \
47 dst[0] + srcSliceY * dstStride[0], dstStride[0], \
48 src[0], srcStride[0], \
49 src[1], srcStride[1], \
50 src[2], srcStride[2], \
52 c->yuv2rgb_y_offset >> 6, \
53 c->yuv2rgb_y_coeff); \
56 #define DECLARE_FF_YUVX_TO_GBRP_FUNCS(ifmt, ofmt) \
57 int ff_##ifmt##_to_##ofmt##_neon(int w, int h, \
58 uint8_t *dst, int linesize, \
59 const uint8_t *srcY, int linesizeY, \
60 const uint8_t *srcU, int linesizeU, \
61 const uint8_t *srcV, int linesizeV, \
62 const int16_t *table, \
65 uint8_t *dst1, int linesize1, \
66 uint8_t *dst2, int linesize2); \
68 static int ifmt##_to_##ofmt##_neon_wrapper(SwsInternal *c, const uint8_t *const src[], \
69 const int srcStride[], int srcSliceY, \
70 int srcSliceH, uint8_t *const dst[], \
71 const int dstStride[]) { \
72 const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE }; \
74 return ff_##ifmt##_to_##ofmt##_neon(c->srcW, srcSliceH, \
75 dst[0] + srcSliceY * dstStride[0], dstStride[0], \
76 src[0], srcStride[0], \
77 src[1], srcStride[1], \
78 src[2], srcStride[2], \
80 c->yuv2rgb_y_offset >> 6, \
82 dst[1] + srcSliceY * dstStride[1], dstStride[1], \
83 dst[2] + srcSliceY * dstStride[2], dstStride[2]); \
86 #define DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuvx) \
87 DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, argb) \
88 DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, rgba) \
89 DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, abgr) \
90 DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, bgra) \
91 DECLARE_FF_YUVX_TO_GBRP_FUNCS(yuvx, gbrp) \
96 #define DECLARE_FF_NVX_TO_RGBX_FUNCS(ifmt, ofmt) \
97 int ff_##ifmt##_to_##ofmt##_neon(int w, int h, \
98 uint8_t *dst, int linesize, \
99 const uint8_t *srcY, int linesizeY, \
100 const uint8_t *srcC, int linesizeC, \
101 const int16_t *table, \
105 static int ifmt##_to_##ofmt##_neon_wrapper(SwsInternal *c, const uint8_t *const src[], \
106 const int srcStride[], int srcSliceY, \
107 int srcSliceH, uint8_t *const dst[], \
108 const int dstStride[]) { \
109 const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE }; \
111 return ff_##ifmt##_to_##ofmt##_neon(c->srcW, srcSliceH, \
112 dst[0] + srcSliceY * dstStride[0], dstStride[0], \
113 src[0], srcStride[0], src[1], srcStride[1], \
115 c->yuv2rgb_y_offset >> 6, \
116 c->yuv2rgb_y_coeff); \
119 #define DECLARE_FF_NVX_TO_GBRP_FUNCS(ifmt, ofmt) \
120 int ff_##ifmt##_to_##ofmt##_neon(int w, int h, \
121 uint8_t *dst, int linesize, \
122 const uint8_t *srcY, int linesizeY, \
123 const uint8_t *srcC, int linesizeC, \
124 const int16_t *table, \
127 uint8_t *dst1, int linesize1, \
128 uint8_t *dst2, int linesize2); \
130 static int ifmt##_to_##ofmt##_neon_wrapper(SwsInternal *c, const uint8_t *const src[], \
131 const int srcStride[], int srcSliceY, \
132 int srcSliceH, uint8_t *const dst[], \
133 const int dstStride[]) { \
134 const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE }; \
136 return ff_##ifmt##_to_##ofmt##_neon(c->srcW, srcSliceH, \
137 dst[0] + srcSliceY * dstStride[0], dstStride[0], \
138 src[0], srcStride[0], src[1], srcStride[1], \
140 c->yuv2rgb_y_offset >> 6, \
141 c->yuv2rgb_y_coeff, \
142 dst[1] + srcSliceY * dstStride[1], dstStride[1], \
143 dst[2] + srcSliceY * dstStride[2], dstStride[2]); \
147 uint8_t *dst2,
int dstStride2,
148 const uint8_t *
src,
int srcStride,
152 const int srcStride[],
int srcSliceY,
int srcSliceH,
153 uint8_t *
const dst[],
const int dstStride[])
155 uint8_t *dst1 =
dst[1] + dstStride[1] * srcSliceY / 2;
156 uint8_t *dst2 =
dst[2] + dstStride[2] * srcSliceY / 2;
159 dst[0], dstStride[0]);
163 src[1], srcStride[1],
c->srcW / 2, srcSliceH);
166 src[1], srcStride[1],
c->srcW / 2, srcSliceH);
171 #define DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nvx) \
172 DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, argb) \
173 DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, rgba) \
174 DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, abgr) \
175 DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, bgra) \
176 DECLARE_FF_NVX_TO_GBRP_FUNCS(nvx, gbrp) \
185 #define SET_FF_NVX_TO_RGBX_FUNC(ifmt, IFMT, ofmt, OFMT, accurate_rnd) do { \
186 if (c->srcFormat == AV_PIX_FMT_##IFMT \
187 && c->dstFormat == AV_PIX_FMT_##OFMT \
191 c->convert_unscaled = ifmt##_to_##ofmt##_neon_wrapper; \
194 #define SET_FF_NVX_TO_ALL_RGBX_FUNC(nvx, NVX, accurate_rnd) do { \
195 SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, argb, ARGB, accurate_rnd); \
196 SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, rgba, RGBA, accurate_rnd); \
197 SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, abgr, ABGR, accurate_rnd); \
198 SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, bgra, BGRA, accurate_rnd); \
199 SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, gbrp, GBRP, accurate_rnd); \
212 !(
c->srcH & 1) && !(
c->srcW & 15) && !accurate_rnd)
static void get_unscaled_swscale_neon(SwsInternal *c)
#define SET_FF_NVX_TO_ALL_RGBX_FUNC(nvx, NVX, accurate_rnd)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
static atomic_int cpu_flags
#define DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuvx)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void ff_get_unscaled_swscale_aarch64(SwsInternal *c)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
void ff_copyPlane(const uint8_t *src, int srcStride, int srcSliceY, int srcSliceH, int width, uint8_t *dst, int dstStride)
@ AV_PIX_FMT_NV24
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
void ff_nv24_to_yuv420p_chroma_neon(uint8_t *dst1, int dstStride1, uint8_t *dst2, int dstStride2, const uint8_t *src, int srcStride, int w, int h)
@ AV_PIX_FMT_NV42
as above, but U and V bytes are swapped
#define DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nvx)
static int nv24_to_yuv420p_neon_wrapper(SwsInternal *c, const uint8_t *const src[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])