33 #define OFFSET(x) offsetof(ConvolutionContext, x) 34 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM 65 static const int same5x5[25] = {0, 0, 0, 0, 0,
71 static const int same7x7[49] = {0, 0, 0, 0, 0, 0, 0,
111 float scale,
float delta,
const int *
const matrix,
115 uint16_t *
dst = (uint16_t *)dstp;
118 for (x = 0; x <
width; x++) {
124 dst[x] =
av_clip(sqrtf(suma*suma + sumb*sumb) * scale + delta, 0, peak);
129 float scale,
float delta,
const int *
const matrix,
133 uint16_t *
dst = (uint16_t *)dstp;
136 for (x = 0; x <
width; x++) {
140 dst[x] =
av_clip(sqrtf(suma*suma + sumb*sumb) * scale + delta, 0, peak);
145 float scale,
float delta,
const int *
const matrix,
149 uint16_t *
dst = (uint16_t *)dstp;
152 for (x = 0; x <
width; x++) {
158 dst[x] =
av_clip(sqrtf(suma*suma + sumb*sumb) * scale + delta, 0, peak);
163 float scale,
float delta,
const int *
const matrix,
167 uint16_t *
dst = (uint16_t *)dstp;
168 const uint16_t *c0 = (
const uint16_t *)c[0], *
c1 = (
const uint16_t *)c[1], *
c2 = (
const uint16_t *)c[2];
169 const uint16_t *
c3 = (
const uint16_t *)c[3], *
c5 = (
const uint16_t *)c[5];
170 const uint16_t *
c6 = (
const uint16_t *)c[6], *
c7 = (
const uint16_t *)c[7], *c8 = (
const uint16_t *)c[8];
173 for (x = 0; x <
width; x++) {
174 int sum0 = c0[x] * 5 +
c1[x] * 5 + c2[x] * 5 +
175 c3[x] * -3 +
c5[x] * -3 +
176 c6[x] * -3 +
c7[x] * -3 + c8[x] * -3;
177 int sum1 = c0[x] * -3 +
c1[x] * 5 + c2[x] * 5 +
178 c3[x] * 5 +
c5[x] * -3 +
179 c6[x] * -3 +
c7[x] * -3 + c8[x] * -3;
180 int sum2 = c0[x] * -3 +
c1[x] * -3 + c2[x] * 5 +
181 c3[x] * 5 +
c5[x] * 5 +
182 c6[x] * -3 +
c7[x] * -3 + c8[x] * -3;
183 int sum3 = c0[x] * -3 +
c1[x] * -3 + c2[x] * -3 +
184 c3[x] * 5 +
c5[x] * 5 +
185 c6[x] * 5 +
c7[x] * -3 + c8[x] * -3;
186 int sum4 = c0[x] * -3 +
c1[x] * -3 + c2[x] * -3 +
187 c3[x] * -3 +
c5[x] * 5 +
188 c6[x] * 5 +
c7[x] * 5 + c8[x] * -3;
189 int sum5 = c0[x] * -3 +
c1[x] * -3 + c2[x] * -3 +
190 c3[x] * -3 +
c5[x] * -3 +
191 c6[x] * 5 +
c7[x] * 5 + c8[x] * 5;
192 int sum6 = c0[x] * 5 +
c1[x] * -3 + c2[x] * -3 +
193 c3[x] * -3 +
c5[x] * -3 +
194 c6[x] * -3 +
c7[x] * 5 + c8[x] * 5;
195 int sum7 = c0[x] * 5 +
c1[x] * 5 + c2[x] * -3 +
196 c3[x] * -3 +
c5[x] * -3 +
197 c6[x] * -3 +
c7[x] * -3 + c8[x] * 5;
199 sum0 =
FFMAX(sum0, sum1);
200 sum2 =
FFMAX(sum2, sum3);
201 sum4 =
FFMAX(sum4, sum5);
202 sum6 =
FFMAX(sum6, sum7);
203 sum0 =
FFMAX(sum0, sum2);
204 sum4 =
FFMAX(sum4, sum6);
205 sum0 =
FFMAX(sum0, sum4);
212 float scale,
float delta,
const int *
const matrix,
221 for (x = 0; x <
width; x++) {
222 float suma = c0[x] * -1 + c1[x] * -1 + c2[x] * -1 +
223 c6[x] * 1 + c7[x] * 1 + c8[x] * 1;
224 float sumb = c0[x] * -1 + c2[x] * 1 + c3[x] * -1 +
225 c5[x] * 1 + c6[x] * -1 + c8[x] * 1;
227 dst[x] =
av_clip_uint8(sqrtf(suma*suma + sumb*sumb) * scale + delta);
232 float scale,
float delta,
const int *
const matrix,
238 for (x = 0; x <
width; x++) {
239 float suma = c[0][x] * 1 + c[1][x] * -1;
240 float sumb = c[4][x] * 1 + c[3][x] * -1;
242 dst[x] =
av_clip_uint8(sqrtf(suma*suma + sumb*sumb) * scale + delta);
247 float scale,
float delta,
const int *
const matrix,
256 for (x = 0; x <
width; x++) {
257 float suma = c0[x] * -1 + c1[x] * -2 + c2[x] * -1 +
258 c6[x] * 1 + c7[x] * 2 + c8[x] * 1;
259 float sumb = c0[x] * -1 + c2[x] * 1 + c3[x] * -2 +
260 c5[x] * 2 + c6[x] * -1 + c8[x] * 1;
262 dst[x] =
av_clip_uint8(sqrtf(suma*suma + sumb*sumb) * scale + delta);
267 float scale,
float delta,
const int *
const matrix,
276 for (x = 0; x <
width; x++) {
277 int sum0 = c0[x] * 5 + c1[x] * 5 + c2[x] * 5 +
278 c3[x] * -3 + c5[x] * -3 +
279 c6[x] * -3 + c7[x] * -3 + c8[x] * -3;
280 int sum1 = c0[x] * -3 + c1[x] * 5 + c2[x] * 5 +
281 c3[x] * 5 + c5[x] * -3 +
282 c6[x] * -3 + c7[x] * -3 + c8[x] * -3;
283 int sum2 = c0[x] * -3 + c1[x] * -3 + c2[x] * 5 +
284 c3[x] * 5 + c5[x] * 5 +
285 c6[x] * -3 + c7[x] * -3 + c8[x] * -3;
286 int sum3 = c0[x] * -3 + c1[x] * -3 + c2[x] * -3 +
287 c3[x] * 5 + c5[x] * 5 +
288 c6[x] * 5 + c7[x] * -3 + c8[x] * -3;
289 int sum4 = c0[x] * -3 + c1[x] * -3 + c2[x] * -3 +
290 c3[x] * -3 + c5[x] * 5 +
291 c6[x] * 5 + c7[x] * 5 + c8[x] * -3;
292 int sum5 = c0[x] * -3 + c1[x] * -3 + c2[x] * -3 +
293 c3[x] * -3 + c5[x] * -3 +
294 c6[x] * 5 + c7[x] * 5 + c8[x] * 5;
295 int sum6 = c0[x] * 5 + c1[x] * -3 + c2[x] * -3 +
296 c3[x] * -3 + c5[x] * -3 +
297 c6[x] * -3 + c7[x] * 5 + c8[x] * 5;
298 int sum7 = c0[x] * 5 + c1[x] * 5 + c2[x] * -3 +
299 c3[x] * -3 + c5[x] * -3 +
300 c6[x] * -3 + c7[x] * -3 + c8[x] * 5;
302 sum0 =
FFMAX(sum0, sum1);
303 sum2 =
FFMAX(sum2, sum3);
304 sum4 =
FFMAX(sum4, sum5);
305 sum6 =
FFMAX(sum6, sum7);
306 sum0 =
FFMAX(sum0, sum2);
307 sum4 =
FFMAX(sum4, sum6);
308 sum0 =
FFMAX(sum0, sum4);
315 float rdiv,
float bias,
const int *
const matrix,
319 uint16_t *
dst = (uint16_t *)dstp;
322 for (x = 0; x <
width; x++) {
323 int sum =
AV_RN16A(&c[0][2 * x]) * matrix[0] +
324 AV_RN16A(&c[1][2 * x]) * matrix[1] +
325 AV_RN16A(&c[2][2 * x]) * matrix[2] +
326 AV_RN16A(&c[3][2 * x]) * matrix[3] +
327 AV_RN16A(&c[4][2 * x]) * matrix[4] +
328 AV_RN16A(&c[5][2 * x]) * matrix[5] +
329 AV_RN16A(&c[6][2 * x]) * matrix[6] +
330 AV_RN16A(&c[7][2 * x]) * matrix[7] +
332 sum = (
int)(sum * rdiv + bias + 0.5
f);
333 dst[x] =
av_clip(sum, 0, peak);
338 float rdiv,
float bias,
const int *
const matrix,
342 uint16_t *
dst = (uint16_t *)dstp;
345 for (x = 0; x <
width; x++) {
348 for (i = 0; i < 25; i++)
349 sum +=
AV_RN16A(&c[i][2 * x]) * matrix[
i];
351 sum = (
int)(sum * rdiv + bias + 0.5
f);
352 dst[x] =
av_clip(sum, 0, peak);
357 float rdiv,
float bias,
const int *
const matrix,
361 uint16_t *
dst = (uint16_t *)dstp;
364 for (x = 0; x <
width; x++) {
367 for (i = 0; i < 49; i++)
368 sum +=
AV_RN16A(&c[i][2 * x]) * matrix[
i];
370 sum = (
int)(sum * rdiv + bias + 0.5
f);
371 dst[x] =
av_clip(sum, 0, peak);
376 float rdiv,
float bias,
const int *
const matrix,
380 uint16_t *
dst = (uint16_t *)dstp;
383 for (x = 0; x <
width; x++) {
386 for (i = 0; i < 2 * radius + 1; i++)
387 sum +=
AV_RN16A(&c[i][2 * x]) * matrix[
i];
389 sum = (
int)(sum * rdiv + bias + 0.5
f);
390 dst[x] =
av_clip(sum, 0, peak);
395 float rdiv,
float bias,
const int *
const matrix,
400 uint16_t *
dst = (uint16_t *)dstp;
403 for (
int y = 0; y <
height; y++) {
405 memset(sum, 0,
sizeof(sum));
406 for (
int i = 0;
i < 2 * radius + 1;
i++) {
407 for (
int off16 = 0; off16 <
width; off16++)
408 sum[off16] +=
AV_RN16A(&c[
i][0 + y * stride + off16 * 2]) * matrix[
i];
411 for (
int off16 = 0; off16 <
width; off16++) {
412 sum[off16] = (
int)(sum[off16] * rdiv + bias + 0.5
f);
413 dst[off16] =
av_clip(sum[off16], 0, peak);
420 float rdiv,
float bias,
const int *
const matrix,
426 for (x = 0; x <
width; x++) {
429 for (i = 0; i < 49; i++)
430 sum += c[i][x] * matrix[i];
432 sum = (
int)(sum * rdiv + bias + 0.5
f);
438 float rdiv,
float bias,
const int *
const matrix,
444 for (x = 0; x <
width; x++) {
447 for (i = 0; i < 25; i++)
448 sum += c[i][x] * matrix[i];
450 sum = (
int)(sum * rdiv + bias + 0.5
f);
456 float rdiv,
float bias,
const int *
const matrix,
465 for (x = 0; x <
width; x++) {
466 int sum = c0[x] * matrix[0] + c1[x] * matrix[1] + c2[x] * matrix[2] +
467 c3[x] * matrix[3] + c4[x] * matrix[4] + c5[x] * matrix[5] +
468 c6[x] * matrix[6] + c7[x] * matrix[7] + c8[x] * matrix[8];
469 sum = (
int)(sum * rdiv + bias + 0.5
f);
475 float rdiv,
float bias,
const int *
const matrix,
481 for (x = 0; x <
width; x++) {
484 for (i = 0; i < 2 * radius + 1; i++)
485 sum += c[i][x] * matrix[i];
487 sum = (
int)(sum * rdiv + bias + 0.5
f);
493 float rdiv,
float bias,
const int *
const matrix,
499 for (
int y = 0; y <
height; y++) {
500 memset(sum, 0,
sizeof(sum));
502 for (
int i = 0;
i < 2 * radius + 1;
i++) {
503 for (
int off16 = 0; off16 < 16; off16++)
504 sum[off16] += c[
i][0 + y * stride + off16] * matrix[
i];
507 for (
int off16 = 0; off16 < 16; off16++) {
508 sum[off16] = (
int)(sum[off16] * rdiv + bias + 0.5
f);
516 int x,
int w,
int y,
int h,
int bpc)
520 for (i = 0; i < 9; i++) {
524 xoff = xoff >= w ? 2 * w - 1 - xoff :
xoff;
525 yoff = yoff >= h ? 2 * h - 1 - yoff :
yoff;
527 c[
i] = src + xoff * bpc + yoff *
stride;
532 int x,
int w,
int y,
int h,
int bpc)
536 for (i = 0; i < 25; i++) {
540 xoff = xoff >= w ? 2 * w - 1 - xoff :
xoff;
541 yoff = yoff >= h ? 2 * h - 1 - yoff :
yoff;
543 c[
i] = src + xoff * bpc + yoff *
stride;
548 int x,
int w,
int y,
int h,
int bpc)
552 for (i = 0; i < 49; i++) {
556 xoff = xoff >= w ? 2 * w - 1 - xoff :
xoff;
557 yoff = yoff >= h ? 2 * h - 1 - yoff :
yoff;
559 c[
i] = src + xoff * bpc + yoff *
stride;
564 int x,
int w,
int y,
int h,
int bpc)
568 for (i = 0; i < radius * 2 + 1; i++) {
571 xoff = xoff >= w ? 2 * w - 1 - xoff :
xoff;
573 c[
i] = src + xoff * bpc + y *
stride;
578 int x,
int w,
int y,
int h,
int bpc)
582 for (i = 0; i < radius * 2 + 1; i++) {
585 xoff = xoff >= h ? 2 * h - 1 - xoff :
xoff;
587 c[
i] = src + y * bpc + xoff *
stride;
599 for (plane = 0; plane < s->
nb_planes; plane++) {
601 const int bpc = s->
bpc;
609 const int slice_start = (sizeh * jobnr) / nb_jobs;
610 const int slice_end = (sizeh * (jobnr+1)) / nb_jobs;
614 const int dst_pos = slice_start * (mode ==
MATRIX_COLUMN ? bpc : dstride);
621 if (s->
copy[plane]) {
624 (slice_end - slice_start) * bpc, height);
627 width * bpc, slice_end - slice_start);
634 for (x = 0; x < radius; x++) {
635 const int xoff = mode ==
MATRIX_COLUMN ? (y - slice_start) * bpc : x * bpc;
640 bias, matrix,
c, s->
max, radius,
645 rdiv, bias, matrix,
c, s->
max, radius,
647 for (x = sizew - radius; x < sizew; x++) {
648 const int xoff = mode ==
MATRIX_COLUMN ? (y - slice_start) * bpc : x * bpc;
653 bias, matrix,
c, s->
max, radius,
683 if (!strcmp(ctx->
filter->
name,
"convolution")) {
690 else if (s->
size[p] == 3)
692 else if (s->
size[p] == 5)
694 else if (s->
size[p] == 7)
698 #if CONFIG_CONVOLUTION_FILTER && ARCH_X86_64 701 }
else if (!strcmp(ctx->
filter->
name,
"prewitt")) {
705 }
else if (!strcmp(ctx->
filter->
name,
"roberts")) {
709 }
else if (!strcmp(ctx->
filter->
name,
"sobel")) {
713 }
else if (!strcmp(ctx->
filter->
name,
"kirsch")) {
750 if (!strcmp(ctx->
filter->
name,
"convolution")) {
751 for (i = 0; i < 4; i++) {
752 int *matrix = (
int *)s->
matrix[i];
819 s->
rdiv[
i] = 1. / sum;
821 if (s->
copy[i] && (s->
rdiv[i] != 1. || s->
bias[i] != 0.))
824 }
else if (!strcmp(ctx->
filter->
name,
"prewitt")) {
825 for (i = 0; i < 4; i++) {
835 }
else if (!strcmp(ctx->
filter->
name,
"roberts")) {
836 for (i = 0; i < 4; i++) {
846 }
else if (!strcmp(ctx->
filter->
name,
"sobel")) {
847 for (i = 0; i < 4; i++) {
857 }
else if (!strcmp(ctx->
filter->
name,
"kirsch")) {
858 for (i = 0; i < 4; i++) {
874 char *res,
int res_len,
int flags)
903 #if CONFIG_CONVOLUTION_FILTER 906 .
name =
"convolution",
909 .priv_class = &convolution_class,
912 .
inputs = convolution_inputs,
913 .
outputs = convolution_outputs,
920 #if CONFIG_PREWITT_FILTER || CONFIG_ROBERTS_FILTER || CONFIG_SOBEL_FILTER 922 static const AVOption prewitt_roberts_sobel_options[] = {
929 #if CONFIG_PREWITT_FILTER 931 #define prewitt_options prewitt_roberts_sobel_options 938 .priv_class = &prewitt_class,
941 .
inputs = convolution_inputs,
942 .
outputs = convolution_outputs,
949 #if CONFIG_SOBEL_FILTER 951 #define sobel_options prewitt_roberts_sobel_options 958 .priv_class = &sobel_class,
961 .
inputs = convolution_inputs,
962 .
outputs = convolution_outputs,
969 #if CONFIG_ROBERTS_FILTER 971 #define roberts_options prewitt_roberts_sobel_options 978 .priv_class = &roberts_class,
981 .
inputs = convolution_inputs,
982 .
outputs = convolution_outputs,
989 #if CONFIG_KIRSCH_FILTER 991 #define kirsch_options prewitt_roberts_sobel_options 998 .priv_class = &kirsch_class,
1001 .
inputs = convolution_inputs,
1002 .
outputs = convolution_outputs,
static void filter16_kirsch(uint8_t *dstp, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
static void filter_kirsch(uint8_t *dst, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Main libavfilter public API header.
static void sobel(int w, int h, uint16_t *dst, int dst_linesize, int8_t *dir, int dir_linesize, const uint8_t *src, int src_linesize)
static void filter_roberts(uint8_t *dst, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
int h
agreed upon image height
#define AV_PIX_FMT_GBRP10
static void setup_7x7(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
#define AV_PIX_FMT_YUV420P12
static void filter_sobel(uint8_t *dst, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static void filter16_row(uint8_t *dstp, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
static const struct @322 planes[]
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
#define AV_PIX_FMT_GRAY10
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
const char * name
Pad name.
#define AV_PIX_FMT_GRAY12
static void setup_5x5(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static void filter_prewitt(uint8_t *dst, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
#define AV_PIX_FMT_YUVA420P9
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
void(* setup[4])(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int width, int y, int height, int bpc)
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
AVFilter ff_vf_convolution
A filter pad used for either input or output.
A link between two filters.
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
static void filter16_roberts(uint8_t *dstp, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const int same7x7[49]
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
static void filter16_3x3(uint8_t *dstp, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static const AVFilterPad convolution_inputs[]
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options...
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_GBRAP16
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static void filter16_7x7(uint8_t *dstp, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
int w
agreed upon image width
static void filter16_column(uint8_t *dstp, int height, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
#define AV_PIX_FMT_YUV422P9
static void filter_3x3(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
#define AV_PIX_FMT_GBRP16
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
#define AV_PIX_FMT_GRAY16
static const AVFilterPad convolution_outputs[]
static void filter_row(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUVA444P12
static void filter_column(uint8_t *dst, int height, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
static const AVOption convolution_options[]
static int query_formats(AVFilterContext *ctx)
void(* filter[4])(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
static av_cold int init(AVFilterContext *ctx)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
static const AVFilterPad outputs[]
int format
agreed upon media format
#define AV_PIX_FMT_YUV420P16
static void filter16_sobel(uint8_t *dstp, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
static void filter_5x5(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
#define AV_PIX_FMT_YUV420P14
Used for passing data between threads.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
#define AV_PIX_FMT_GRAY14
static void filter16_5x5(uint8_t *dstp, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static void setup_3x3(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
AVFILTER_DEFINE_CLASS(convolution)
static const int same3x3[9]
static void setup_row(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
const char * name
Filter name.
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV420P9
AVFilterLink ** outputs
array of pointers to output links
static enum AVPixelFormat pix_fmts[]
static void filter_7x7(uint8_t *dst, int width, float rdiv, float bias, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
#define flags(name, subs,...)
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
#define AV_PIX_FMT_YUV422P10
void ff_convolution_init_x86(ConvolutionContext *s)
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
static void setup_column(int radius, const uint8_t *c[], const uint8_t *src, int stride, int x, int w, int y, int h, int bpc)
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
planar GBRA 4:4:4:4 32bpp
static const int same5x5[25]
#define AV_PIX_FMT_YUVA444P9
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
avfilter_execute_func * execute
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
AVFilterContext * dst
dest filter
static void filter16_prewitt(uint8_t *dstp, int width, float scale, float delta, const int *const matrix, const uint8_t *c[], int peak, int radius, int dstride, int stride, int size)
const AVPixFmtDescriptor * desc
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static int config_input(AVFilterLink *inlink)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
AVPixelFormat
Pixel format.
mode
Use these values in ebur128_init (or'ed).
const AVFilter * filter
the AVFilter of which this is an instance
#define AV_PIX_FMT_YUV422P16
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define AV_PIX_FMT_YUVA422P12
#define AV_CEIL_RSHIFT(a, b)