FFmpeg
vf_cropdetect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 A'rpi
3  * This file is part of FFmpeg.
4  *
5  * FFmpeg is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * FFmpeg is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19 
20 /**
21  * @file
22  * border detection filter
23  * Ported from MPlayer libmpcodecs/vf_cropdetect.c.
24  */
25 
26 #include "libavutil/imgutils.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/opt.h"
30 #include "libavutil/qsort.h"
31 
32 #include "avfilter.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 #include "edge_common.h"
37 
38 typedef struct CropDetectContext {
39  const AVClass *class;
40  int x1, y1, x2, y2;
41  float limit;
42  int round;
43  int skip;
45  int frame_nb;
46  int max_pixsteps[4];
48  int mode;
51  float low, high;
52  uint8_t low_u8, high_u8;
53  uint8_t *filterbuf;
54  uint8_t *tmpbuf;
55  uint16_t *gradients;
56  char *directions;
57  int *bboxes[4];
59 
60 static const enum AVPixelFormat pix_fmts[] = {
75 };
76 
77 enum CropMode {
81 };
82 
83 static int comp(const int *a,const int *b)
84 {
85  return FFDIFFSIGN(*a, *b);
86 }
87 
88 static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
89 {
90  int total = 0;
91  int div = len;
92  const uint16_t *src16 = (const uint16_t *)src;
93 
94  switch (bpp) {
95  case 1:
96  while (len >= 8) {
97  total += src[ 0] + src[ stride] + src[2*stride] + src[3*stride]
98  + src[4*stride] + src[5*stride] + src[6*stride] + src[7*stride];
99  src += 8*stride;
100  len -= 8;
101  }
102  while (--len >= 0) {
103  total += src[0];
104  src += stride;
105  }
106  break;
107  case 2:
108  stride >>= 1;
109  while (len >= 8) {
110  total += src16[ 0] + src16[ stride] + src16[2*stride] + src16[3*stride]
111  + src16[4*stride] + src16[5*stride] + src16[6*stride] + src16[7*stride];
112  src16 += 8*stride;
113  len -= 8;
114  }
115  while (--len >= 0) {
116  total += src16[0];
117  src16 += stride;
118  }
119  break;
120  case 3:
121  case 4:
122  while (len >= 4) {
123  total += src[0] + src[1 ] + src[2 ]
124  + src[ stride] + src[1+ stride] + src[2+ stride]
125  + src[2*stride] + src[1+2*stride] + src[2+2*stride]
126  + src[3*stride] + src[1+3*stride] + src[2+3*stride];
127  src += 4*stride;
128  len -= 4;
129  }
130  while (--len >= 0) {
131  total += src[0] + src[1] + src[2];
132  src += stride;
133  }
134  div *= 3;
135  break;
136  }
137  total /= div;
138 
139  av_log(ctx, AV_LOG_DEBUG, "total:%d\n", total);
140  return total;
141 }
142 
143 static int checkline_edge(void *ctx, const unsigned char *src, int stride, int len, int bpp)
144 {
145  const uint16_t *src16 = (const uint16_t *)src;
146 
147  switch (bpp) {
148  case 1:
149  while (--len >= 0) {
150  if (src[0]) return 0;
151  src += stride;
152  }
153  break;
154  case 2:
155  stride >>= 1;
156  while (--len >= 0) {
157  if (src16[0]) return 0;
158  src16 += stride;
159  }
160  break;
161  case 3:
162  case 4:
163  while (--len >= 0) {
164  if (src[0] || src[1] || src[2]) return 0;
165  src += stride;
166  }
167  break;
168  }
169 
170  return 1;
171 }
172 
174 {
175  CropDetectContext *s = ctx->priv;
176 
177  s->frame_nb = -1 * s->skip;
178  s->low_u8 = s->low * 255. + .5;
179  s->high_u8 = s->high * 255. + .5;
180 
181  av_log(ctx, AV_LOG_VERBOSE, "limit:%f round:%d skip:%d reset_count:%d\n",
182  s->limit, s->round, s->skip, s->reset_count);
183 
184  return 0;
185 }
186 
188 {
189  CropDetectContext *s = ctx->priv;
190 
191  av_freep(&s->tmpbuf);
192  av_freep(&s->filterbuf);
193  av_freep(&s->gradients);
194  av_freep(&s->directions);
195  av_freep(&s->bboxes[0]);
196  av_freep(&s->bboxes[1]);
197  av_freep(&s->bboxes[2]);
198  av_freep(&s->bboxes[3]);
199 }
200 
202 {
203  AVFilterContext *ctx = inlink->dst;
204  CropDetectContext *s = ctx->priv;
206  const int bufsize = inlink->w * inlink->h;
207 
208  av_image_fill_max_pixsteps(s->max_pixsteps, NULL, desc);
209 
210  if (s->limit < 1.0)
211  s->limit *= (1 << desc->comp[0].depth) - 1;
212 
213  s->x1 = inlink->w - 1;
214  s->y1 = inlink->h - 1;
215  s->x2 = 0;
216  s->y2 = 0;
217 
218  s->window_size = FFMAX(s->reset_count, 15);
219  s->tmpbuf = av_malloc(bufsize);
220  s->filterbuf = av_malloc(bufsize * s->max_pixsteps[0]);
221  s->gradients = av_calloc(bufsize, sizeof(*s->gradients));
222  s->directions = av_malloc(bufsize);
223  s->bboxes[0] = av_malloc(s->window_size * sizeof(*s->bboxes[0]));
224  s->bboxes[1] = av_malloc(s->window_size * sizeof(*s->bboxes[1]));
225  s->bboxes[2] = av_malloc(s->window_size * sizeof(*s->bboxes[2]));
226  s->bboxes[3] = av_malloc(s->window_size * sizeof(*s->bboxes[3]));
227 
228  if (!s->tmpbuf || !s->filterbuf || !s->gradients || !s->directions ||
229  !s->bboxes[0] || !s->bboxes[1] || !s->bboxes[2] || !s->bboxes[3])
230  return AVERROR(ENOMEM);
231 
232  return 0;
233 }
234 
235 #define SET_META(key, value) \
236  av_dict_set_int(metadata, key, value, 0)
237 
239 {
240  AVFilterContext *ctx = inlink->dst;
241  CropDetectContext *s = ctx->priv;
242  int bpp = s->max_pixsteps[0];
243  int w, h, x, y, shrink_by, i;
244  AVDictionary **metadata;
245  int outliers, last_y;
246  int limit = lrint(s->limit);
247 
248  const int inw = inlink->w;
249  const int inh = inlink->h;
250  uint8_t *tmpbuf = s->tmpbuf;
251  uint8_t *filterbuf = s->filterbuf;
252  uint16_t *gradients = s->gradients;
253  int8_t *directions = s->directions;
254  const AVFrameSideData *sd = NULL;
255  int scan_w, scan_h, bboff;
256 
257  void (*sobel)(int w, int h, uint16_t *dst, int dst_linesize,
258  int8_t *dir, int dir_linesize,
259  const uint8_t *src, int src_linesize, int src_stride) = (bpp == 2) ? &ff_sobel_16 : &ff_sobel_8;
260  void (*gaussian_blur)(int w, int h,
261  uint8_t *dst, int dst_linesize,
262  const uint8_t *src, int src_linesize, int src_stride) = (bpp == 2) ? &ff_gaussian_blur_16 : &ff_gaussian_blur_8;
263 
264 
265  // ignore first s->skip frames
266  if (++s->frame_nb > 0) {
267  metadata = &frame->metadata;
268 
269  // Reset the crop area every reset_count frames, if reset_count is > 0
270  if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
271  s->x1 = frame->width - 1;
272  s->y1 = frame->height - 1;
273  s->x2 = 0;
274  s->y2 = 0;
275  s->frame_nb = 1;
276  }
277 
278 #define FIND(DST, FROM, NOEND, INC, STEP0, STEP1, LEN) \
279  outliers = 0;\
280  for (last_y = y = FROM; NOEND; y = y INC) {\
281  if (checkline(ctx, frame->data[0] + STEP0 * y, STEP1, LEN, bpp) > limit) {\
282  if (++outliers > s->max_outliers) { \
283  DST = last_y;\
284  break;\
285  }\
286  } else\
287  last_y = y INC;\
288  }
289 
290  if (s->mode == MODE_BLACK) {
291  FIND(s->y1, 0, y < s->y1, +1, frame->linesize[0], bpp, frame->width);
292  FIND(s->y2, frame->height - 1, y > FFMAX(s->y2, s->y1), -1, frame->linesize[0], bpp, frame->width);
293  FIND(s->x1, 0, y < s->x1, +1, bpp, frame->linesize[0], frame->height);
294  FIND(s->x2, frame->width - 1, y > FFMAX(s->x2, s->x1), -1, bpp, frame->linesize[0], frame->height);
295  } else { // MODE_MV_EDGES
297  s->x1 = 0;
298  s->y1 = 0;
299  s->x2 = inw - 1;
300  s->y2 = inh - 1;
301 
302  if (!sd) {
303  av_log(ctx, AV_LOG_WARNING, "Cannot detect: no motion vectors available");
304  } else {
305  // gaussian filter to reduce noise
306  gaussian_blur(inw, inh,
307  filterbuf, inw*bpp,
308  frame->data[0], frame->linesize[0], bpp);
309 
310  // compute the 16-bits gradients and directions for the next step
311  sobel(inw, inh, gradients, inw, directions, inw, filterbuf, inw*bpp, bpp);
312 
313  // non_maximum_suppression() will actually keep & clip what's necessary and
314  // ignore the rest, so we need a clean output buffer
315  memset(tmpbuf, 0, inw * inh);
316  ff_non_maximum_suppression(inw, inh, tmpbuf, inw, directions, inw, gradients, inw);
317 
318 
319  // keep high values, or low values surrounded by high values
320  ff_double_threshold(s->low_u8, s->high_u8, inw, inh,
321  tmpbuf, inw, tmpbuf, inw);
322 
323  // scan all MVs and store bounding box
324  s->x1 = inw - 1;
325  s->y1 = inh - 1;
326  s->x2 = 0;
327  s->y2 = 0;
328  for (i = 0; i < sd->size / sizeof(AVMotionVector); i++) {
329  const AVMotionVector *mv = (const AVMotionVector*)sd->data + i;
330  const int mx = mv->dst_x - mv->src_x;
331  const int my = mv->dst_y - mv->src_y;
332 
333  if (mv->dst_x >= 0 && mv->dst_x < inw &&
334  mv->dst_y >= 0 && mv->dst_y < inh &&
335  mv->src_x >= 0 && mv->src_x < inw &&
336  mv->src_y >= 0 && mv->src_y < inh &&
337  mx * mx + my * my >= s->mv_threshold * s->mv_threshold) {
338  s->x1 = mv->dst_x < s->x1 ? mv->dst_x : s->x1;
339  s->y1 = mv->dst_y < s->y1 ? mv->dst_y : s->y1;
340  s->x2 = mv->dst_x > s->x2 ? mv->dst_x : s->x2;
341  s->y2 = mv->dst_y > s->y2 ? mv->dst_y : s->y2;
342  }
343  }
344 
345  // assert x1<x2, y1<y2
346  if (s->x1 > s->x2) FFSWAP(int, s->x1, s->x2);
347  if (s->y1 > s->y2) FFSWAP(int, s->y1, s->y2);
348 
349  // scan outward looking for 0-edge-lines in edge image
350  scan_w = s->x2 - s->x1;
351  scan_h = s->y2 - s->y1;
352 
353 #define FIND_EDGE(DST, FROM, NOEND, INC, STEP0, STEP1, LEN) \
354  for (last_y = y = FROM; NOEND; y = y INC) { \
355  if (checkline_edge(ctx, tmpbuf + STEP0 * y, STEP1, LEN, bpp)) { \
356  if (last_y INC == y) { \
357  DST = y; \
358  break; \
359  } else \
360  last_y = y; \
361  } \
362  } \
363  if (!(NOEND)) { \
364  DST = y -(INC); \
365  }
366 
367  FIND_EDGE(s->y1, s->y1, y >= 0, -1, inw, bpp, scan_w);
368  FIND_EDGE(s->y2, s->y2, y < inh, +1, inw, bpp, scan_w);
369  FIND_EDGE(s->x1, s->x1, y >= 0, -1, bpp, inw, scan_h);
370  FIND_EDGE(s->x2, s->x2, y < inw, +1, bpp, inw, scan_h);
371 
372  // queue bboxes
373  bboff = (s->frame_nb - 1) % s->window_size;
374  s->bboxes[0][bboff] = s->x1;
375  s->bboxes[1][bboff] = s->x2;
376  s->bboxes[2][bboff] = s->y1;
377  s->bboxes[3][bboff] = s->y2;
378 
379  // sort queue
380  bboff = FFMIN(s->frame_nb, s->window_size);
381  AV_QSORT(s->bboxes[0], bboff, int, comp);
382  AV_QSORT(s->bboxes[1], bboff, int, comp);
383  AV_QSORT(s->bboxes[2], bboff, int, comp);
384  AV_QSORT(s->bboxes[3], bboff, int, comp);
385 
386  // return median of window_size elems
387  s->x1 = s->bboxes[0][bboff/2];
388  s->x2 = s->bboxes[1][bboff/2];
389  s->y1 = s->bboxes[2][bboff/2];
390  s->y2 = s->bboxes[3][bboff/2];
391  }
392  }
393 
394  // round x and y (up), important for yuv colorspaces
395  // make sure they stay rounded!
396  x = (s->x1+1) & ~1;
397  y = (s->y1+1) & ~1;
398 
399  w = s->x2 - x + 1;
400  h = s->y2 - y + 1;
401 
402  // w and h must be divisible by 2 as well because of yuv
403  // colorspace problems.
404  if (s->round <= 1)
405  s->round = 16;
406  if (s->round % 2)
407  s->round *= 2;
408 
409  shrink_by = w % s->round;
410  w -= shrink_by;
411  x += (shrink_by/2 + 1) & ~1;
412 
413  shrink_by = h % s->round;
414  h -= shrink_by;
415  y += (shrink_by/2 + 1) & ~1;
416 
417  SET_META("lavfi.cropdetect.x1", s->x1);
418  SET_META("lavfi.cropdetect.x2", s->x2);
419  SET_META("lavfi.cropdetect.y1", s->y1);
420  SET_META("lavfi.cropdetect.y2", s->y2);
421  SET_META("lavfi.cropdetect.w", w);
422  SET_META("lavfi.cropdetect.h", h);
423  SET_META("lavfi.cropdetect.x", x);
424  SET_META("lavfi.cropdetect.y", y);
425 
427  "x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
428  s->x1, s->x2, s->y1, s->y2, w, h, x, y, frame->pts,
429  frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
430  w, h, x, y);
431  }
432 
433  return ff_filter_frame(inlink->dst->outputs[0], frame);
434 }
435 
436 #define OFFSET(x) offsetof(CropDetectContext, x)
437 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
438 
439 static const AVOption cropdetect_options[] = {
440  { "limit", "Threshold below which the pixel is considered black", OFFSET(limit), AV_OPT_TYPE_FLOAT, { .dbl = 24.0/255 }, 0, 65535, FLAGS },
441  { "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, INT_MAX, FLAGS },
442  { "reset", "Recalculate the crop area after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
443  { "skip", "Number of initial frames to skip", OFFSET(skip), AV_OPT_TYPE_INT, { .i64 = 2 }, 0, INT_MAX, FLAGS },
444  { "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
445  { "max_outliers", "Threshold count of outliers", OFFSET(max_outliers),AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
446  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_BLACK}, 0, MODE_NB-1, FLAGS, "mode" },
447  { "black", "detect black pixels surrounding the video", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BLACK}, INT_MIN, INT_MAX, FLAGS, "mode" },
448  { "mvedges", "detect motion and edged surrounding the video", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MV_EDGES}, INT_MIN, INT_MAX, FLAGS, "mode" },
449  { "high", "Set high threshold for edge detection", OFFSET(high), AV_OPT_TYPE_FLOAT, {.dbl=25/255.}, 0, 1, FLAGS },
450  { "low", "Set low threshold for edge detection", OFFSET(low), AV_OPT_TYPE_FLOAT, {.dbl=15/255.}, 0, 1, FLAGS },
451  { "mv_threshold", "motion vector threshold when estimating video window size", OFFSET(mv_threshold), AV_OPT_TYPE_INT, {.i64=8}, 0, 100, FLAGS},
452  { NULL }
453 };
454 
455 AVFILTER_DEFINE_CLASS(cropdetect);
456 
458  {
459  .name = "default",
460  .type = AVMEDIA_TYPE_VIDEO,
461  .config_props = config_input,
462  .filter_frame = filter_frame,
463  },
464 };
465 
467  {
468  .name = "default",
469  .type = AVMEDIA_TYPE_VIDEO
470  },
471 };
472 
474  .name = "cropdetect",
475  .description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
476  .priv_size = sizeof(CropDetectContext),
477  .priv_class = &cropdetect_class,
478  .init = init,
479  .uninit = uninit,
484 };
CropDetectContext::high_u8
uint8_t high_u8
Definition: vf_cropdetect.c:52
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_cropdetect.c:173
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
gaussian_blur
void fn() gaussian_blur(int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int src_stride)
Definition: edge_template.c:74
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:691
AVMotionVector
Definition: motion_vector.h:24
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:999
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2703
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:170
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
SET_META
#define SET_META(key, value)
Definition: vf_cropdetect.c:235
CropDetectContext::filterbuf
uint8_t * filterbuf
Definition: vf_cropdetect.c:53
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
w
uint8_t w
Definition: llviddspenc.c:38
CropDetectContext::tmpbuf
uint8_t * tmpbuf
Definition: vf_cropdetect.c:54
edge_common.h
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:34
CropMode
CropMode
Definition: vf_cropdetect.c:77
CropDetectContext::low
float low
Definition: vf_cropdetect.c:51
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:411
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVDictionary
Definition: dict.c:30
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
comp
static int comp(const int *a, const int *b)
Definition: vf_cropdetect.c:83
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:175
MODE_BLACK
@ MODE_BLACK
Definition: vf_cropdetect.c:78
video.h
checkline
static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
Definition: vf_cropdetect.c:88
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
CropDetectContext::gradients
uint16_t * gradients
Definition: vf_cropdetect.c:55
formats.h
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:409
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: macros.h:45
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:414
lrint
#define lrint
Definition: tablegen.h:53
CropDetectContext::window_size
int window_size
Definition: vf_cropdetect.c:49
AVFrameSideData::size
size_t size
Definition: frame.h:234
av_cold
#define av_cold
Definition: attributes.h:90
CropDetectContext::skip
int skip
Definition: vf_cropdetect.c:43
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:423
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
motion_vector.h
CropDetectContext
Definition: vf_cropdetect.c:38
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:424
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
CropDetectContext::low_u8
uint8_t low_u8
Definition: vf_cropdetect.c:52
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:408
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:422
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(cropdetect)
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
CropDetectContext::y1
int y1
Definition: vf_cropdetect.c:40
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:190
CropDetectContext::max_pixsteps
int max_pixsteps[4]
Definition: vf_cropdetect.c:46
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
if
if(ret)
Definition: filter_design.txt:179
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_cropdetect.c:238
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
FLAGS
#define FLAGS
Definition: vf_cropdetect.c:437
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
CropDetectContext::y2
int y2
Definition: vf_cropdetect.c:40
cropdetect_options
static const AVOption cropdetect_options[]
Definition: vf_cropdetect.c:439
ff_non_maximum_suppression
void ff_non_maximum_suppression(int w, int h, uint8_t *dst, int dst_linesize, const int8_t *dir, int dir_linesize, const uint16_t *src, int src_linesize)
Filters rounded gradients to drop all non-maxima pixels in the magnitude image Expects gradients gene...
Definition: edge_common.c:60
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:412
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
CropDetectContext::x1
int x1
Definition: vf_cropdetect.c:40
qsort.h
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
ff_double_threshold
void ff_double_threshold(int low, int high, int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)
Filters all pixels in src to keep all pixels > high, and keep all pixels > low where all surrounding ...
Definition: edge_common.c:89
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_cropdetect.c:60
FIND
#define FIND(DST, FROM, NOEND, INC, STEP0, STEP1, LEN)
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:416
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:418
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
OFFSET
#define OFFSET(x)
Definition: vf_cropdetect.c:436
CropDetectContext::max_outliers
int max_outliers
Definition: vf_cropdetect.c:47
CropDetectContext::reset_count
int reset_count
Definition: vf_cropdetect.c:44
CropDetectContext::bboxes
int * bboxes[4]
Definition: vf_cropdetect.c:57
MODE_MV_EDGES
@ MODE_MV_EDGES
Definition: vf_cropdetect.c:79
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
CropDetectContext::limit
float limit
Definition: vf_cropdetect.c:41
CropDetectContext::round
int round
Definition: vf_cropdetect.c:42
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_cropdetect.c:201
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
CropDetectContext::frame_nb
int frame_nb
Definition: vf_cropdetect.c:45
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:152
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
FIND_EDGE
#define FIND_EDGE(DST, FROM, NOEND, INC, STEP0, STEP1, LEN)
CropDetectContext::directions
char * directions
Definition: vf_cropdetect.c:56
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_QSORT
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
round
static av_always_inline av_const double round(double x)
Definition: libm.h:444
internal.h
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MODE_NB
@ MODE_NB
Definition: vf_cropdetect.c:80
AV_PIX_FMT_NV21
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
Definition: pixfmt.h:90
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:410
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
stride
#define stride
Definition: h264pred_template.c:537
CropDetectContext::mv_threshold
int mv_threshold
Definition: vf_cropdetect.c:50
AVFilter
Filter definition.
Definition: avfilter.h:171
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:415
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:420
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:143
sobel
void fn() sobel(int w, int h, uint16_t *dst, int dst_linesize, int8_t *dir, int dir_linesize, const uint8_t *src, int src_linesize, int src_stride)
Definition: edge_template.c:43
CropDetectContext::x2
int x2
Definition: vf_cropdetect.c:40
av_image_fill_max_pixsteps
void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], const AVPixFmtDescriptor *pixdesc)
Compute the max pixel step for each plane of an image with a format described by pixdesc.
Definition: imgutils.c:35
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:408
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
checkline_edge
static int checkline_edge(void *ctx, const unsigned char *src, int stride, int len, int bpp)
Definition: vf_cropdetect.c:143
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
avfilter_vf_cropdetect_inputs
static const AVFilterPad avfilter_vf_cropdetect_inputs[]
Definition: vf_cropdetect.c:457
avfilter_vf_cropdetect_outputs
static const AVFilterPad avfilter_vf_cropdetect_outputs[]
Definition: vf_cropdetect.c:466
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:191
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
imgutils.h
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:421
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_cropdetect.c:187
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
CropDetectContext::high
float high
Definition: vf_cropdetect.c:51
CropDetectContext::mode
int mode
Definition: vf_cropdetect.c:48
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:419
ff_vf_cropdetect
const AVFilter ff_vf_cropdetect
Definition: vf_cropdetect.c:473