22 #ifndef AVFILTER_UNSHARP_OPENCL_KERNEL_H
23 #define AVFILTER_UNSHARP_OPENCL_KERNEL_H
28 inline unsigned char clip_uint8(
int a)
36 kernel
void unsharp_luma(
37 global
unsigned char *
src,
38 global
unsigned char *dst,
49 int2 threadIdx, blockIdx, globalIdx;
50 threadIdx.x = get_local_id(0);
51 threadIdx.y = get_local_id(1);
52 blockIdx.x = get_group_id(0);
53 blockIdx.y = get_group_id(1);
54 globalIdx.x = get_global_id(0);
55 globalIdx.y = get_global_id(1);
58 if (globalIdx.x < width && globalIdx.y < height)
59 dst[globalIdx.x + globalIdx.y*dst_stride] = src[globalIdx.x + globalIdx.y*src_stride];
63 local
unsigned int l[32][32];
64 local
unsigned int lcx[LU_RADIUS_X];
65 local
unsigned int lcy[LU_RADIUS_Y];
66 int indexIx, indexIy, i, j;
69 for(i = 0; i <= 1; i++) {
70 indexIy = -8 + (blockIdx.y + i) * 16 + threadIdx.y;
71 indexIy = indexIy < 0 ? 0 : indexIy;
72 indexIy = indexIy >= height ? height - 1: indexIy;
73 for(j = 0; j <= 1; j++) {
74 indexIx = -8 + (blockIdx.x + j) * 16 + threadIdx.x;
75 indexIx = indexIx < 0 ? 0 : indexIx;
76 indexIx = indexIx >= width ? width - 1: indexIx;
77 l[i*16 + threadIdx.y][j*16 + threadIdx.x] = src[indexIy*src_stride + indexIx];
81 int indexL = threadIdx.y*16 + threadIdx.x;
82 if (indexL < LU_RADIUS_X)
83 lcx[indexL] = mask_x[indexL];
84 if (indexL < LU_RADIUS_Y)
85 lcy[indexL] = mask_y[indexL];
86 barrier(CLK_LOCAL_MEM_FENCE);
89 int orig_value = (
int)l[threadIdx.y + 8][threadIdx.x + 8];
91 int idx, idy, maskIndex;
93 int steps_x = (LU_RADIUS_X-1)/2;
94 int steps_y = (LU_RADIUS_Y-1)/2;
98 for (j = 0; j <=1; j++) {
100 idx = 16*j + threadIdx.x;
102 for (i = -steps_y; i <= steps_y; i++) {
103 idy = 8 + i + threadIdx.y;
104 maskIndex = (i + steps_y);
105 temp[j] += (
int)l[idy][idx] * lcy[maskIndex];
108 barrier(CLK_LOCAL_MEM_FENCE);
110 idy = 8 + threadIdx.y;
112 for (j = 0; j <=1; j++) {
113 idx = 16*j + threadIdx.x;
114 l[idy][idx] =
temp[j];
116 barrier(CLK_LOCAL_MEM_FENCE);
120 idy = 8 + threadIdx.y;
122 for (j = -steps_x; j <= steps_x; j++) {
123 idx = 8 + j + threadIdx.x;
124 maskIndex = j + steps_x;
125 sum += (
int)l[idy][idx] * lcx[maskIndex];
128 int res = orig_value + (((orig_value - (
int)((sum + halfscale) >> scalebits)) * amount) >> 16);
130 if (globalIdx.x < width && globalIdx.y < height)
131 dst[globalIdx.x + globalIdx.y*dst_stride] = clip_uint8(res);
134 kernel
void unsharp_chroma(
135 global
unsigned char *src_y,
136 global
unsigned char *dst_y,
151 global
unsigned char *dst_u = dst_y + height * dst_stride_lu;
152 global
unsigned char *dst_v = dst_u + ch * dst_stride_ch;
153 global
unsigned char *src_u = src_y + height * src_stride_lu;
154 global
unsigned char *src_v = src_u + ch * src_stride_ch;
155 int2 threadIdx, blockIdx, globalIdx;
156 threadIdx.x = get_local_id(0);
157 threadIdx.y = get_local_id(1);
158 blockIdx.x = get_group_id(0);
159 blockIdx.y = get_group_id(1);
160 globalIdx.x = get_global_id(0);
161 globalIdx.y = get_global_id(1);
162 int padch = get_global_size(1)/2;
163 global
unsigned char *src = globalIdx.y>=padch ? src_v : src_u;
164 global
unsigned char *dst = globalIdx.y>=padch ? dst_v : dst_u;
166 blockIdx.y = globalIdx.y>=padch ? blockIdx.y - get_num_groups(1)/2 : blockIdx.y;
167 globalIdx.y = globalIdx.y>=padch ? globalIdx.y - padch : globalIdx.y;
170 if (globalIdx.x < cw && globalIdx.y < ch)
171 dst[globalIdx.x + globalIdx.y*dst_stride_ch] = src[globalIdx.x + globalIdx.y*src_stride_ch];
175 local
unsigned int l[32][32];
176 local
unsigned int lcx[CH_RADIUS_X];
177 local
unsigned int lcy[CH_RADIUS_Y];
178 int indexIx, indexIy, i, j;
179 for(i = 0; i <= 1; i++) {
180 indexIy = -8 + (blockIdx.y + i) * 16 + threadIdx.y;
181 indexIy = indexIy < 0 ? 0 : indexIy;
182 indexIy = indexIy >= ch ? ch - 1: indexIy;
183 for(j = 0; j <= 1; j++) {
184 indexIx = -8 + (blockIdx.x + j) * 16 + threadIdx.x;
185 indexIx = indexIx < 0 ? 0 : indexIx;
186 indexIx = indexIx >= cw ? cw - 1: indexIx;
187 l[i*16 + threadIdx.y][j*16 + threadIdx.x] = src[indexIy * src_stride_ch + indexIx];
191 int indexL = threadIdx.y*16 + threadIdx.x;
192 if (indexL < CH_RADIUS_X)
193 lcx[indexL] = mask_x[indexL];
194 if (indexL < CH_RADIUS_Y)
195 lcy[indexL] = mask_y[indexL];
196 barrier(CLK_LOCAL_MEM_FENCE);
198 int orig_value = (
int)l[threadIdx.y + 8][threadIdx.x + 8];
200 int idx, idy, maskIndex;
201 int steps_x = CH_RADIUS_X/2;
202 int steps_y = CH_RADIUS_Y/2;
206 for (j = 0; j <= 1; j++) {
207 idx = 16*j + threadIdx.x;
209 for (i = -steps_y; i <= steps_y; i++) {
210 idy = 8 + i + threadIdx.y;
211 maskIndex = i + steps_y;
212 temp[j] += (
int)l[idy][idx] * lcy[maskIndex];
216 barrier(CLK_LOCAL_MEM_FENCE);
217 idy = 8 + threadIdx.y;
219 for (j = 0; j <= 1; j++) {
220 idx = 16*j + threadIdx.x;
221 l[idy][idx] =
temp[j];
223 barrier(CLK_LOCAL_MEM_FENCE);
227 idy = 8 + threadIdx.y;
229 for (j = -steps_x; j <= steps_x; j++) {
230 idx = 8 + j + threadIdx.x;
231 maskIndex = j + steps_x;
232 sum += (
int)l[idy][idx] * lcx[maskIndex];
235 int res = orig_value + (((orig_value - (
int)((sum + halfscale) >> scalebits)) * amount) >> 16);
237 if (globalIdx.x < cw && globalIdx.y < ch)
238 dst[globalIdx.x + globalIdx.y*dst_stride_ch] = clip_uint8(res);
241 kernel
void unsharp_default(global
unsigned char *src,
242 global
unsigned char *dst,
243 const global
unsigned int *mask_lu,
244 const global
unsigned int *mask_ch,
264 global
unsigned char *dst_y = dst;
265 global
unsigned char *dst_u = dst_y + height * dst_stride_lu;
266 global
unsigned char *dst_v = dst_u + ch * dst_stride_ch;
268 global
unsigned char *src_y =
src;
269 global
unsigned char *src_u = src_y + height * src_stride_lu;
270 global
unsigned char *src_v = src_u + ch * src_stride_ch;
272 global
unsigned char *temp_dst;
273 global
unsigned char *temp_src;
274 const global
unsigned int *temp_mask;
275 int global_id = get_global_id(0);
276 int i, j, x, y, temp_src_stride, temp_dst_stride, temp_height, temp_width, temp_steps_x, temp_steps_y,
277 temp_amount, temp_scalebits, temp_halfscale, sum, idx_x, idx_y,
temp, res;
278 if (global_id < width * height) {
279 y = global_id /
width;
280 x = global_id %
width;
283 temp_src_stride = src_stride_lu;
284 temp_dst_stride = dst_stride_lu;
287 temp_steps_x = step_x_lu;
288 temp_steps_y = step_y_lu;
290 temp_amount = amount_lu;
291 temp_scalebits = scalebits_lu;
292 temp_halfscale = halfscale_lu;
293 }
else if ((global_id >= width * height) && (global_id < width * height + ch * cw)) {
294 y = (global_id - width *
height) / cw;
295 x = (global_id - width *
height) % cw;
298 temp_src_stride = src_stride_ch;
299 temp_dst_stride = dst_stride_ch;
302 temp_steps_x = step_x_ch;
303 temp_steps_y = step_y_ch;
305 temp_amount = amount_ch;
306 temp_scalebits = scalebits_ch;
307 temp_halfscale = halfscale_ch;
309 y = (global_id - width * height - ch * cw) / cw;
310 x = (global_id - width * height - ch * cw) % cw;
313 temp_src_stride = src_stride_ch;
314 temp_dst_stride = dst_stride_ch;
317 temp_steps_x = step_x_ch;
318 temp_steps_y = step_y_ch;
320 temp_amount = amount_ch;
321 temp_scalebits = scalebits_ch;
322 temp_halfscale = halfscale_ch;
326 for (j = 0; j <= 2 * temp_steps_y; j++) {
327 idx_y = (y - temp_steps_y + j) <= 0 ? 0 : (y - temp_steps_y + j) >= temp_height ? temp_height-1 : y - temp_steps_y + j;
328 for (i = 0; i <= 2 * temp_steps_x; i++) {
329 idx_x = (x - temp_steps_x + i) <= 0 ? 0 : (x - temp_steps_x + i) >= temp_width ? temp_width-1 : x - temp_steps_x + i;
330 sum += temp_mask[i + j * (2 * temp_steps_x + 1)] * temp_src[idx_x + idx_y * temp_src_stride];
333 temp = (
int)temp_src[x + y * temp_src_stride];
334 res = temp + (((temp - (
int)((sum + temp_halfscale) >> temp_scalebits)) * temp_amount) >> 16);
335 temp_dst[x + y * temp_dst_stride] = clip_uint8(res);
337 temp_dst[x + y * temp_dst_stride] = temp_src[x + y * temp_src_stride];
#define AV_OPENCL_KERNEL(...)
const char * ff_kernel_unsharp_opencl
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch