FFmpeg
vf_cropdetect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 A'rpi
3  * This file is part of FFmpeg.
4  *
5  * FFmpeg is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * FFmpeg is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19 
20 /**
21  * @file
22  * border detection filter
23  * Ported from MPlayer libmpcodecs/vf_cropdetect.c.
24  */
25 
26 #include "libavutil/imgutils.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/opt.h"
29 
30 #include "avfilter.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 typedef struct CropDetectContext {
36  const AVClass *class;
37  int x1, y1, x2, y2;
38  float limit;
39  int round;
41  int frame_nb;
42  int max_pixsteps[4];
45 
47 {
48  static const enum AVPixelFormat pix_fmts[] = {
63  };
64 
65  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
66  if (!fmts_list)
67  return AVERROR(ENOMEM);
68  return ff_set_common_formats(ctx, fmts_list);
69 }
70 
71 static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
72 {
73  int total = 0;
74  int div = len;
75  const uint16_t *src16 = (const uint16_t *)src;
76 
77  switch (bpp) {
78  case 1:
79  while (len >= 8) {
80  total += src[ 0] + src[ stride] + src[2*stride] + src[3*stride]
81  + src[4*stride] + src[5*stride] + src[6*stride] + src[7*stride];
82  src += 8*stride;
83  len -= 8;
84  }
85  while (--len >= 0) {
86  total += src[0];
87  src += stride;
88  }
89  break;
90  case 2:
91  stride >>= 1;
92  while (len >= 8) {
93  total += src16[ 0] + src16[ stride] + src16[2*stride] + src16[3*stride]
94  + src16[4*stride] + src16[5*stride] + src16[6*stride] + src16[7*stride];
95  src16 += 8*stride;
96  len -= 8;
97  }
98  while (--len >= 0) {
99  total += src16[0];
100  src16 += stride;
101  }
102  break;
103  case 3:
104  case 4:
105  while (len >= 4) {
106  total += src[0] + src[1 ] + src[2 ]
107  + src[ stride] + src[1+ stride] + src[2+ stride]
108  + src[2*stride] + src[1+2*stride] + src[2+2*stride]
109  + src[3*stride] + src[1+3*stride] + src[2+3*stride];
110  src += 4*stride;
111  len -= 4;
112  }
113  while (--len >= 0) {
114  total += src[0] + src[1] + src[2];
115  src += stride;
116  }
117  div *= 3;
118  break;
119  }
120  total /= div;
121 
122  av_log(ctx, AV_LOG_DEBUG, "total:%d\n", total);
123  return total;
124 }
125 
127 {
128  CropDetectContext *s = ctx->priv;
129 
130  s->frame_nb = -2;
131 
132  av_log(ctx, AV_LOG_VERBOSE, "limit:%f round:%d reset_count:%d\n",
133  s->limit, s->round, s->reset_count);
134 
135  return 0;
136 }
137 
139 {
140  AVFilterContext *ctx = inlink->dst;
141  CropDetectContext *s = ctx->priv;
143 
145 
146  if (s->limit < 1.0)
147  s->limit *= (1 << desc->comp[0].depth) - 1;
148 
149  s->x1 = inlink->w - 1;
150  s->y1 = inlink->h - 1;
151  s->x2 = 0;
152  s->y2 = 0;
153 
154  return 0;
155 }
156 
157 #define SET_META(key, value) \
158  av_dict_set_int(metadata, key, value, 0)
159 
161 {
162  AVFilterContext *ctx = inlink->dst;
163  CropDetectContext *s = ctx->priv;
164  int bpp = s->max_pixsteps[0];
165  int w, h, x, y, shrink_by;
166  AVDictionary **metadata;
167  int outliers, last_y;
168  int limit = lrint(s->limit);
169 
170  // ignore first 2 frames - they may be empty
171  if (++s->frame_nb > 0) {
172  metadata = &frame->metadata;
173 
174  // Reset the crop area every reset_count frames, if reset_count is > 0
175  if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
176  s->x1 = frame->width - 1;
177  s->y1 = frame->height - 1;
178  s->x2 = 0;
179  s->y2 = 0;
180  s->frame_nb = 1;
181  }
182 
183 #define FIND(DST, FROM, NOEND, INC, STEP0, STEP1, LEN) \
184  outliers = 0;\
185  for (last_y = y = FROM; NOEND; y = y INC) {\
186  if (checkline(ctx, frame->data[0] + STEP0 * y, STEP1, LEN, bpp) > limit) {\
187  if (++outliers > s->max_outliers) { \
188  DST = last_y;\
189  break;\
190  }\
191  } else\
192  last_y = y INC;\
193  }
194 
195  FIND(s->y1, 0, y < s->y1, +1, frame->linesize[0], bpp, frame->width);
196  FIND(s->y2, frame->height - 1, y > FFMAX(s->y2, s->y1), -1, frame->linesize[0], bpp, frame->width);
197  FIND(s->x1, 0, y < s->x1, +1, bpp, frame->linesize[0], frame->height);
198  FIND(s->x2, frame->width - 1, y > FFMAX(s->x2, s->x1), -1, bpp, frame->linesize[0], frame->height);
199 
200 
201  // round x and y (up), important for yuv colorspaces
202  // make sure they stay rounded!
203  x = (s->x1+1) & ~1;
204  y = (s->y1+1) & ~1;
205 
206  w = s->x2 - x + 1;
207  h = s->y2 - y + 1;
208 
209  // w and h must be divisible by 2 as well because of yuv
210  // colorspace problems.
211  if (s->round <= 1)
212  s->round = 16;
213  if (s->round % 2)
214  s->round *= 2;
215 
216  shrink_by = w % s->round;
217  w -= shrink_by;
218  x += (shrink_by/2 + 1) & ~1;
219 
220  shrink_by = h % s->round;
221  h -= shrink_by;
222  y += (shrink_by/2 + 1) & ~1;
223 
224  SET_META("lavfi.cropdetect.x1", s->x1);
225  SET_META("lavfi.cropdetect.x2", s->x2);
226  SET_META("lavfi.cropdetect.y1", s->y1);
227  SET_META("lavfi.cropdetect.y2", s->y2);
228  SET_META("lavfi.cropdetect.w", w);
229  SET_META("lavfi.cropdetect.h", h);
230  SET_META("lavfi.cropdetect.x", x);
231  SET_META("lavfi.cropdetect.y", y);
232 
233  av_log(ctx, AV_LOG_INFO,
234  "x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
235  s->x1, s->x2, s->y1, s->y2, w, h, x, y, frame->pts,
236  frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
237  w, h, x, y);
238  }
239 
240  return ff_filter_frame(inlink->dst->outputs[0], frame);
241 }
242 
243 #define OFFSET(x) offsetof(CropDetectContext, x)
244 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
245 
246 static const AVOption cropdetect_options[] = {
247  { "limit", "Threshold below which the pixel is considered black", OFFSET(limit), AV_OPT_TYPE_FLOAT, { .dbl = 24.0/255 }, 0, 65535, FLAGS },
248  { "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, INT_MAX, FLAGS },
249  { "reset", "Recalculate the crop area after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
250  { "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
251  { "max_outliers", "Threshold count of outliers", OFFSET(max_outliers),AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
252  { NULL }
253 };
254 
255 AVFILTER_DEFINE_CLASS(cropdetect);
256 
258  {
259  .name = "default",
260  .type = AVMEDIA_TYPE_VIDEO,
261  .config_props = config_input,
262  .filter_frame = filter_frame,
263  },
264  { NULL }
265 };
266 
268  {
269  .name = "default",
270  .type = AVMEDIA_TYPE_VIDEO
271  },
272  { NULL }
273 };
274 
276  .name = "cropdetect",
277  .description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
278  .priv_size = sizeof(CropDetectContext),
279  .priv_class = &cropdetect_class,
280  .init = init,
282  .inputs = avfilter_vf_cropdetect_inputs,
283  .outputs = avfilter_vf_cropdetect_outputs,
285 };
#define NULL
Definition: coverity.c:32
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
const char * desc
Definition: nvenc.c:68
#define SET_META(key, value)
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
#define src
Definition: vp8dsp.c:254
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
AVFilter ff_vf_cropdetect
void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], const AVPixFmtDescriptor *pixdesc)
Compute the max pixel step for each plane of an image with a format described by pixdesc.
Definition: imgutils.c:35
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
#define av_cold
Definition: attributes.h:82
AVOptions.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
static av_cold int init(AVFilterContext *ctx)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
AVDictionary * metadata
metadata.
Definition: frame.h:581
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
int width
Definition: frame.h:353
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
#define FIND(DST, FROM, NOEND, INC, STEP0, STEP1, LEN)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
#define FFMAX(a, b)
Definition: common.h:94
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
common internal API header
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
as above, but U and V bytes are swapped
Definition: pixfmt.h:90
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
static const AVOption cropdetect_options[]
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
AVFILTER_DEFINE_CLASS(cropdetect)
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int query_formats(AVFilterContext *ctx)
Definition: vf_cropdetect.c:46
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
static const AVFilterPad avfilter_vf_cropdetect_inputs[]
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define FLAGS
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define flags(name, subs,...)
Definition: cbs_av1.c:561
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
static const AVFilterPad avfilter_vf_cropdetect_outputs[]
#define OFFSET(x)
int len
static int config_input(AVFilterLink *inlink)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define lrint
Definition: tablegen.h:53
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:353
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
#define stride
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
Definition: vf_cropdetect.c:71