FFmpeg
vf_blurdetect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021 Thilo Borgmann <thilo.borgmann _at_ mail.de>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * No-reference blurdetect filter
24  *
25  * Implementing:
26  * Marziliano, Pina, et al. "A no-reference perceptual blur metric." Proceedings.
27  * International conference on image processing. Vol. 3. IEEE, 2002.
28  * https://infoscience.epfl.ch/record/111802/files/14%20A%20no-reference%20perceptual%20blur%20metric.pdf
29  *
30  * @author Thilo Borgmann <thilo.borgmann _at_ mail.de>
31  */
32 
33 #include "libavutil/mem.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/qsort.h"
37 #include "internal.h"
38 #include "edge_common.h"
39 #include "video.h"
40 
41 static int comp(const float *a,const float *b)
42 {
43  return FFDIFFSIGN(*a, *b);
44 }
45 
46 typedef struct BLRContext {
47  const AVClass *class;
48 
49  int hsub, vsub;
50  int nb_planes;
51 
52  float low, high;
53  uint8_t low_u8, high_u8;
54  int radius; // radius during local maxima detection
55  int block_pct; // percentage of "sharpest" blocks in the image to use for bluriness calculation
56  int block_width; // width for block abbreviation
57  int block_height; // height for block abbreviation
58  int planes; // number of planes to filter
59 
60  double blur_total;
61  uint64_t nb_frames;
62 
63  float *blks;
64  uint8_t *filterbuf;
65  uint8_t *tmpbuf;
66  uint16_t *gradients;
67  int8_t *directions;
68 } BLRContext;
69 
70 #define OFFSET(x) offsetof(BLRContext, x)
71 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
72 static const AVOption blurdetect_options[] = {
73  { "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_FLOAT, {.dbl=30/255.}, 0, 1, FLAGS },
74  { "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_FLOAT, {.dbl=15/255.}, 0, 1, FLAGS },
75  { "radius", "search radius for maxima detection", OFFSET(radius), AV_OPT_TYPE_INT, {.i64=50}, 1, 100, FLAGS },
76  { "block_pct", "block pooling threshold when calculating blurriness", OFFSET(block_pct), AV_OPT_TYPE_INT, {.i64=80}, 1, 100, FLAGS },
77  { "block_width", "block size for block-based abbreviation of blurriness", OFFSET(block_width), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, FLAGS },
78  { "block_height", "block size for block-based abbreviation of blurriness", OFFSET(block_height), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, FLAGS },
79  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=1}, 0, 15, FLAGS },
80  { NULL }
81 };
82 
83 AVFILTER_DEFINE_CLASS(blurdetect);
84 
86 {
87  BLRContext *s = ctx->priv;
88 
89  s->low_u8 = s->low * 255. + .5;
90  s->high_u8 = s->high * 255. + .5;
91 
92  return 0;
93 }
94 
96 {
97  AVFilterContext *ctx = inlink->dst;
98  BLRContext *s = ctx->priv;
99  const int bufsize = inlink->w * inlink->h;
100  const AVPixFmtDescriptor *pix_desc;
101 
102  pix_desc = av_pix_fmt_desc_get(inlink->format);
103  s->hsub = pix_desc->log2_chroma_w;
104  s->vsub = pix_desc->log2_chroma_h;
105  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
106 
107  if (s->block_width < 1 || s->block_height < 1) {
108  s->block_width = inlink->w;
109  s->block_height = inlink->h;
110  }
111 
112  s->tmpbuf = av_malloc(bufsize);
113  s->filterbuf = av_malloc(bufsize);
114  s->gradients = av_calloc(bufsize, sizeof(*s->gradients));
115  s->directions = av_malloc(bufsize);
116  s->blks = av_calloc((inlink->w / s->block_width) * (inlink->h / s->block_height),
117  sizeof(*s->blks));
118 
119  if (!s->tmpbuf || !s->filterbuf || !s->gradients || !s->directions || !s->blks)
120  return AVERROR(ENOMEM);
121 
122  return 0;
123 }
124 
125 // edge width is defined as the distance between surrounding maxima of the edge pixel
126 static float edge_width(BLRContext *blr, int i, int j, int8_t dir, int w, int h,
127  int edge, const uint8_t *src, int src_linesize)
128 {
129  float width = 0;
130  int dX, dY;
131  int sign;
132  int tmp;
133  int p1;
134  int p2;
135  int k, x, y;
136  int radius = blr->radius;
137 
138  switch(dir) {
139  case DIRECTION_HORIZONTAL: dX = 1; dY = 0; break;
140  case DIRECTION_VERTICAL: dX = 0; dY = 1; break;
141  case DIRECTION_45UP: dX = 1; dY = -1; break;
142  case DIRECTION_45DOWN: dX = 1; dY = 1; break;
143  default: dX = 1; dY = 1; break;
144  }
145 
146  // determines if search in direction dX/dY is looking for a maximum or minimum
147  sign = src[j * src_linesize + i] > src[(j - dY) * src_linesize + i - dX] ? 1 : -1;
148 
149  // search in -(dX/dY) direction
150  for (k = 0; k < radius; k++) {
151  x = i - k*dX;
152  y = j - k*dY;
153  p1 = y * src_linesize + x;
154  x -= dX;
155  y -= dY;
156  p2 = y * src_linesize + x;
157  if (x < 0 || x >= w || y < 0 || y >= h)
158  return 0;
159 
160  tmp = (src[p1] - src[p2]) * sign;
161 
162  if (tmp <= 0) // local maximum found
163  break;
164  }
165  width += k;
166 
167  // search in +(dX/dY) direction
168  for (k = 0; k < radius; k++) {
169  x = i + k * dX;
170  y = j + k * dY;
171  p1 = y * src_linesize + x;
172  x += dX;
173  y += dY;
174  p2 = y * src_linesize + x;
175  if (x < 0 || x >= w || y < 0 || y >= h)
176  return 0;
177 
178  tmp = (src[p1] - src[p2]) * sign;
179 
180  if (tmp >= 0) // local maximum found
181  break;
182  }
183  width += k;
184 
185  // for 45 degree directions approximate edge width in pixel units: 0.7 ~= sqrt(2)/2
186  if (dir == DIRECTION_45UP || dir == DIRECTION_45DOWN)
187  width *= 0.7;
188 
189  return width;
190 }
191 
192 static float calculate_blur(BLRContext *s, int w, int h, int hsub, int vsub,
193  int8_t* dir, int dir_linesize,
194  uint8_t* dst, int dst_linesize,
195  uint8_t* src, int src_linesize)
196 {
197  float total_width = 0.0;
198  int block_count;
199  double block_total_width;
200 
201  int i, j;
202  int blkcnt = 0;
203 
204  float *blks = s->blks;
205  float block_pool_threshold = s->block_pct / 100.0;
206 
207  int block_width = AV_CEIL_RSHIFT(s->block_width, hsub);
208  int block_height = AV_CEIL_RSHIFT(s->block_height, vsub);
209  int brows = h / block_height;
210  int bcols = w / block_width;
211 
212  for (int blkj = 0; blkj < brows; blkj++) {
213  for (int blki = 0; blki < bcols; blki++) {
214  block_total_width = 0.0;
215  block_count = 0;
216  for (int inj = 0; inj < block_height; inj++) {
217  for (int ini = 0; ini < block_width; ini++) {
218  i = blki * block_width + ini;
219  j = blkj * block_height + inj;
220 
221  if (dst[j * dst_linesize + i] > 0) {
222  float width = edge_width(s, i, j, dir[j*dir_linesize+i],
223  w, h, dst[j*dst_linesize+i],
224  src, src_linesize);
225  if (width > 0.001) { // throw away zeros
226  block_count++;
227  block_total_width += width;
228  }
229  }
230  }
231  }
232  // if not enough edge pixels in a block, consider it smooth
233  if (block_total_width >= 2 && block_count) {
234  blks[blkcnt] = block_total_width / block_count;
235  blkcnt++;
236  }
237  }
238  }
239 
240  // simple block pooling by sorting and keeping the sharper blocks
241  AV_QSORT(blks, blkcnt, float, comp);
242  blkcnt = ceil(blkcnt * block_pool_threshold);
243  for (int i = 0; i < blkcnt; i++) {
244  total_width += blks[i];
245  }
246 
247  return total_width / blkcnt;
248 }
249 
250 static void set_meta(AVDictionary **metadata, const char *key, float d)
251 {
252  char value[128];
253  snprintf(value, sizeof(value), "%f", d);
254  av_dict_set(metadata, key, value, 0);
255 }
256 
258 {
259  AVFilterContext *ctx = inlink->dst;
260  BLRContext *s = ctx->priv;
261  AVFilterLink *outlink = ctx->outputs[0];
262 
263  const int inw = inlink->w;
264  const int inh = inlink->h;
265 
266  uint8_t *tmpbuf = s->tmpbuf;
267  uint8_t *filterbuf = s->filterbuf;
268  uint16_t *gradients = s->gradients;
269  int8_t *directions = s->directions;
270 
271  float blur = 0.0f;
272  int nplanes = 0;
273  AVDictionary **metadata;
274  metadata = &in->metadata;
275 
276  for (int plane = 0; plane < s->nb_planes; plane++) {
277  int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
278  int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
279  int w = AV_CEIL_RSHIFT(inw, hsub);
280  int h = AV_CEIL_RSHIFT(inh, vsub);
281 
282  if (!((1 << plane) & s->planes))
283  continue;
284 
285  nplanes++;
286 
287  // gaussian filter to reduce noise
288  ff_gaussian_blur_8(w, h,
289  filterbuf, w,
290  in->data[plane], in->linesize[plane], 1);
291 
292  // compute the 16-bits gradients and directions for the next step
293  ff_sobel_8(w, h, gradients, w, directions, w, filterbuf, w, 1);
294 
295  // non_maximum_suppression() will actually keep & clip what's necessary and
296  // ignore the rest, so we need a clean output buffer
297  memset(tmpbuf, 0, inw * inh);
298  ff_non_maximum_suppression(w, h, tmpbuf, w, directions, w, gradients, w);
299 
300 
301  // keep high values, or low values surrounded by high values
302  ff_double_threshold(s->low_u8, s->high_u8, w, h,
303  tmpbuf, w, tmpbuf, w);
304 
305  blur += calculate_blur(s, w, h, hsub, vsub, directions, w,
306  tmpbuf, w, filterbuf, w);
307  }
308 
309  if (nplanes)
310  blur /= nplanes;
311 
312  s->blur_total += blur;
313 
314  // write stats
315  av_log(ctx, AV_LOG_VERBOSE, "blur: %.7f\n", blur);
316 
317  set_meta(metadata, "lavfi.blur", blur);
318 
319  s->nb_frames = inlink->frame_count_in;
320 
321  return ff_filter_frame(outlink, in);
322 }
323 
325 {
326  BLRContext *s = ctx->priv;
327 
328  if (s->nb_frames > 0) {
329  av_log(ctx, AV_LOG_INFO, "blur mean: %.7f\n",
330  s->blur_total / s->nb_frames);
331  }
332 
333  av_freep(&s->tmpbuf);
334  av_freep(&s->filterbuf);
335  av_freep(&s->gradients);
336  av_freep(&s->directions);
337  av_freep(&s->blks);
338 }
339 
340 static const enum AVPixelFormat pix_fmts[] = {
350 };
351 
352 static const AVFilterPad blurdetect_inputs[] = {
353  {
354  .name = "default",
355  .type = AVMEDIA_TYPE_VIDEO,
356  .config_props = blurdetect_config_input,
357  .filter_frame = blurdetect_filter_frame,
358  },
359 };
360 
362  .name = "blurdetect",
363  .description = NULL_IF_CONFIG_SMALL("Blurdetect filter."),
364  .priv_size = sizeof(BLRContext),
370  .priv_class = &blurdetect_class,
372 };
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
BLRContext::hsub
int hsub
Definition: vf_blurdetect.c:49
BLRContext::gradients
uint16_t * gradients
Definition: vf_blurdetect.c:66
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
BLRContext::high_u8
uint8_t high_u8
Definition: vf_blurdetect.c:53
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:162
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
BLRContext::vsub
int vsub
Definition: vf_blurdetect.c:49
w
uint8_t w
Definition: llviddspenc.c:38
edge_common.h
calculate_blur
static float calculate_blur(BLRContext *s, int w, int h, int hsub, int vsub, int8_t *dir, int dir_linesize, uint8_t *dst, int dst_linesize, uint8_t *src, int src_linesize)
Definition: vf_blurdetect.c:192
AVOption
AVOption.
Definition: opt.h:346
b
#define b
Definition: input.c:41
blurdetect_init
static av_cold int blurdetect_init(AVFilterContext *ctx)
Definition: vf_blurdetect.c:85
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AVDictionary
Definition: dict.c:34
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
BLRContext::block_height
int block_height
Definition: vf_blurdetect.c:57
DIRECTION_VERTICAL
@ DIRECTION_VERTICAL
Definition: edge_common.h:36
video.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_blurdetect.c:340
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:74
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3005
BLRContext::low
float low
Definition: vf_blurdetect.c:52
BLRContext::planes
int planes
Definition: vf_blurdetect.c:58
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
FLAGS
#define FLAGS
Definition: vf_blurdetect.c:71
set_meta
static void set_meta(AVDictionary **metadata, const char *key, float d)
Definition: vf_blurdetect.c:250
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: macros.h:45
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
av_cold
#define av_cold
Definition: attributes.h:90
BLRContext::tmpbuf
uint8_t * tmpbuf
Definition: vf_blurdetect.c:65
BLRContext
Definition: vf_blurdetect.c:46
DIRECTION_45DOWN
@ DIRECTION_45DOWN
Definition: edge_common.h:34
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:59
BLRContext::block_width
int block_width
Definition: vf_blurdetect.c:56
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
key
const char * key
Definition: hwcontext_opencl.c:189
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
comp
static int comp(const float *a, const float *b)
Definition: vf_blurdetect.c:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
BLRContext::radius
int radius
Definition: vf_blurdetect.c:54
OFFSET
#define OFFSET(x)
Definition: vf_blurdetect.c:70
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
DIRECTION_HORIZONTAL
@ DIRECTION_HORIZONTAL
Definition: edge_common.h:35
ff_non_maximum_suppression
void ff_non_maximum_suppression(int w, int h, uint8_t *dst, int dst_linesize, const int8_t *dir, int dir_linesize, const uint16_t *src, int src_linesize)
Filters rounded gradients to drop all non-maxima pixels in the magnitude image Expects gradients gene...
Definition: edge_common.c:60
blurdetect_uninit
static av_cold void blurdetect_uninit(AVFilterContext *ctx)
Definition: vf_blurdetect.c:324
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
qsort.h
blur
static void blur(uint8_t *dst, int dst_step, const uint8_t *src, int src_step, int len, int radius, int pixsize)
Definition: vf_boxblur.c:160
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
ff_double_threshold
void ff_double_threshold(int low, int high, int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)
Filters all pixels in src to keep all pixels > high, and keep all pixels > low where all surrounding ...
Definition: edge_common.c:89
BLRContext::blks
float * blks
Definition: vf_blurdetect.c:63
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
BLRContext::low_u8
uint8_t low_u8
Definition: vf_blurdetect.c:53
BLRContext::nb_planes
int nb_planes
Definition: vf_blurdetect.c:50
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
BLRContext::blur_total
double blur_total
Definition: vf_blurdetect.c:60
uninit
static void uninit(AVBSFContext *ctx)
Definition: pcm_rechunk.c:68
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AV_QSORT
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
BLRContext::nb_frames
uint64_t nb_frames
Definition: vf_blurdetect.c:61
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
blurdetect_options
static const AVOption blurdetect_options[]
Definition: vf_blurdetect.c:72
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
edge_width
static float edge_width(BLRContext *blr, int i, int j, int8_t dir, int w, int h, int edge, const uint8_t *src, int src_linesize)
Definition: vf_blurdetect.c:126
AVFilter
Filter definition.
Definition: avfilter.h:166
BLRContext::block_pct
int block_pct
Definition: vf_blurdetect.c:55
blurdetect_inputs
static const AVFilterPad blurdetect_inputs[]
Definition: vf_blurdetect.c:352
blurdetect_filter_frame
static int blurdetect_filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_blurdetect.c:257
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(blurdetect)
BLRContext::high
float high
Definition: vf_blurdetect.c:52
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:692
BLRContext::filterbuf
uint8_t * filterbuf
Definition: vf_blurdetect.c:64
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
BLRContext::directions
int8_t * directions
Definition: vf_blurdetect.c:67
planes
static const struct @400 planes[]
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
DIRECTION_45UP
@ DIRECTION_45UP
Definition: edge_common.h:33
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
blurdetect_config_input
static int blurdetect_config_input(AVFilterLink *inlink)
Definition: vf_blurdetect.c:95
d
d
Definition: ffmpeg_filter.c:424
ff_vf_blurdetect
const AVFilter ff_vf_blurdetect
Definition: vf_blurdetect.c:361
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
snprintf
#define snprintf
Definition: snprintf.h:34
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173