FFmpeg
vf_drawbox.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008 Affine Systems, Inc (Michael Sullivan, Bobby Impollonia)
3  * Copyright (c) 2013 Andrey Utkin <andrey.krieger.utkin gmail com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Box and grid drawing filters. Also a nice template for a filter
25  * that needs to write in the input frame.
26  */
27 
28 #include "libavutil/colorspace.h"
29 #include "libavutil/common.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/parseutils.h"
35 #include "avfilter.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 static const char *const var_names[] = {
41  "dar",
42  "hsub", "vsub",
43  "in_h", "ih", ///< height of the input video
44  "in_w", "iw", ///< width of the input video
45  "sar",
46  "x",
47  "y",
48  "h", ///< height of the rendered box
49  "w", ///< width of the rendered box
50  "t",
51  "fill",
52  NULL
53 };
54 
55 enum { Y, U, V, A };
56 
57 enum var_name {
70 };
71 
72 typedef struct DrawBoxContext {
73  const AVClass *class;
74  int x, y, w, h;
75  int thickness;
76  char *color_str;
77  unsigned char yuv_color[4];
78  int invert_color; ///< invert luma color
79  int vsub, hsub; ///< chroma subsampling
80  char *x_expr, *y_expr; ///< expression for x and y
81  char *w_expr, *h_expr; ///< expression for width and height
82  char *t_expr; ///< expression for thickness
83  char *box_source_string; ///< string for box data source
85  int replace;
88 
89 static const int NUM_EXPR_EVALS = 5;
90 
91 typedef int (*PixelBelongsToRegion)(DrawBoxContext *s, int x, int y);
92 
93 #define ASSIGN_THREE_CHANNELS \
94  row[0] = frame->data[0] + y * frame->linesize[0]; \
95  row[1] = frame->data[1] + (y >> ctx->vsub) * frame->linesize[1]; \
96  row[2] = frame->data[2] + (y >> ctx->vsub) * frame->linesize[2];
97 
98 #define ASSIGN_FOUR_CHANNELS \
99  ASSIGN_THREE_CHANNELS \
100  row[3] = frame->data[3] + y * frame->linesize[3];
101 
102 static void draw_region(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down,
103  PixelBelongsToRegion pixel_belongs_to_region)
104 {
105  unsigned char *row[4];
106  int x, y;
107  if (ctx->have_alpha && ctx->replace) {
108  for (y = top; y < down; y++) {
110  if (ctx->invert_color) {
111  for (x = left; x < right; x++)
112  if (pixel_belongs_to_region(ctx, x, y))
113  row[0][x] = 0xff - row[0][x];
114  } else {
115  for (x = left; x < right; x++) {
116  if (pixel_belongs_to_region(ctx, x, y)) {
117  row[0][x ] = ctx->yuv_color[Y];
118  row[1][x >> ctx->hsub] = ctx->yuv_color[U];
119  row[2][x >> ctx->hsub] = ctx->yuv_color[V];
120  row[3][x ] = ctx->yuv_color[A];
121  }
122  }
123  }
124  }
125  } else {
126  for (y = top; y < down; y++) {
128  if (ctx->invert_color) {
129  for (x = left; x < right; x++)
130  if (pixel_belongs_to_region(ctx, x, y))
131  row[0][x] = 0xff - row[0][x];
132  } else {
133  for (x = left; x < right; x++) {
134  double alpha = (double)ctx->yuv_color[A] / 255;
135 
136  if (pixel_belongs_to_region(ctx, x, y)) {
137  row[0][x ] = (1 - alpha) * row[0][x ] + alpha * ctx->yuv_color[Y];
138  row[1][x >> ctx->hsub] = (1 - alpha) * row[1][x >> ctx->hsub] + alpha * ctx->yuv_color[U];
139  row[2][x >> ctx->hsub] = (1 - alpha) * row[2][x >> ctx->hsub] + alpha * ctx->yuv_color[V];
140  }
141  }
142  }
143  }
144  }
145 }
146 
147 static enum AVFrameSideDataType box_source_string_parse(const char *box_source_string)
148 {
149  av_assert0(box_source_string);
150  if (!strcmp(box_source_string, "side_data_detection_bboxes")) {
152  } else {
153  // will support side_data_regions_of_interest next
154  return AVERROR(EINVAL);
155  }
156 }
157 
159 {
160  DrawBoxContext *s = ctx->priv;
161  uint8_t rgba_color[4];
162 
163  if (s->box_source_string) {
164  s->box_source = box_source_string_parse(s->box_source_string);
165  if ((int)s->box_source < 0) {
166  av_log(ctx, AV_LOG_ERROR, "Error box source: %s\n",s->box_source_string);
167  return AVERROR(EINVAL);
168  }
169  }
170 
171  if (!strcmp(s->color_str, "invert"))
172  s->invert_color = 1;
173  else if (av_parse_color(rgba_color, s->color_str, -1, ctx) < 0)
174  return AVERROR(EINVAL);
175 
176  if (!s->invert_color) {
177  s->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
178  s->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
179  s->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
180  s->yuv_color[A] = rgba_color[3];
181  }
182 
183  return 0;
184 }
185 
186 static const enum AVPixelFormat pix_fmts[] = {
193 };
194 
196 {
197  AVFilterContext *ctx = inlink->dst;
198  DrawBoxContext *s = ctx->priv;
200  double var_values[VARS_NB], res;
201  char *expr;
202  int ret;
203  int i;
204 
205  s->hsub = desc->log2_chroma_w;
206  s->vsub = desc->log2_chroma_h;
207  s->have_alpha = desc->flags & AV_PIX_FMT_FLAG_ALPHA;
208 
209  var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
210  var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
211  var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
212  var_values[VAR_DAR] = (double)inlink->w / inlink->h * var_values[VAR_SAR];
213  var_values[VAR_HSUB] = s->hsub;
214  var_values[VAR_VSUB] = s->vsub;
215  var_values[VAR_X] = NAN;
216  var_values[VAR_Y] = NAN;
217  var_values[VAR_H] = NAN;
218  var_values[VAR_W] = NAN;
219  var_values[VAR_T] = NAN;
220 
221  for (i = 0; i <= NUM_EXPR_EVALS; i++) {
222  /* evaluate expressions, fail on last iteration */
223  var_values[VAR_MAX] = inlink->w;
224  if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
225  var_names, var_values,
226  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
227  goto fail;
228  s->x = var_values[VAR_X] = res;
229 
230  var_values[VAR_MAX] = inlink->h;
231  if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
232  var_names, var_values,
233  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
234  goto fail;
235  s->y = var_values[VAR_Y] = res;
236 
237  var_values[VAR_MAX] = inlink->w - s->x;
238  if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
239  var_names, var_values,
240  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
241  goto fail;
242  s->w = var_values[VAR_W] = res;
243 
244  var_values[VAR_MAX] = inlink->h - s->y;
245  if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
246  var_names, var_values,
247  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
248  goto fail;
249  s->h = var_values[VAR_H] = res;
250 
251  var_values[VAR_MAX] = INT_MAX;
252  if ((ret = av_expr_parse_and_eval(&res, (expr = s->t_expr),
253  var_names, var_values,
254  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
255  goto fail;
256  s->thickness = var_values[VAR_T] = res;
257  }
258 
259  /* if w or h are zero, use the input w/h */
260  s->w = (s->w > 0) ? s->w : inlink->w;
261  s->h = (s->h > 0) ? s->h : inlink->h;
262 
263  /* sanity check width and height */
264  if (s->w < 0 || s->h < 0) {
265  av_log(ctx, AV_LOG_ERROR, "Size values less than 0 are not acceptable.\n");
266  return AVERROR(EINVAL);
267  }
268 
269  av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
270  s->x, s->y, s->w, s->h,
271  s->yuv_color[Y], s->yuv_color[U], s->yuv_color[V], s->yuv_color[A]);
272 
273  return 0;
274 
275 fail:
277  "Error when evaluating the expression '%s'.\n",
278  expr);
279  return ret;
280 }
281 
283 {
284  return (y - s->y < s->thickness) || (s->y + s->h - 1 - y < s->thickness) ||
285  (x - s->x < s->thickness) || (s->x + s->w - 1 - x < s->thickness);
286 }
287 
289 {
290  DrawBoxContext *s = inlink->dst->priv;
292  const AVDetectionBBox *bbox;
293  AVFrameSideData *sd;
294  int loop = 1;
295 
296  if (s->box_source == AV_FRAME_DATA_DETECTION_BBOXES) {
298  if (sd) {
300  loop = header->nb_bboxes;
301  } else {
302  av_log(s, AV_LOG_WARNING, "No detection bboxes.\n");
303  return ff_filter_frame(inlink->dst->outputs[0], frame);
304  }
305  }
306 
307  for (int i = 0; i < loop; i++) {
308  if (header) {
309  bbox = av_get_detection_bbox(header, i);
310  s->y = bbox->y;
311  s->x = bbox->x;
312  s->h = bbox->h;
313  s->w = bbox->w;
314  }
315 
316  draw_region(frame, s, FFMAX(s->x, 0), FFMAX(s->y, 0), FFMIN(s->x + s->w, frame->width),
317  FFMIN(s->y + s->h, frame->height), pixel_belongs_to_box);
318  }
319 
320  return ff_filter_frame(inlink->dst->outputs[0], frame);
321 }
322 
323 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
324 {
325  AVFilterLink *inlink = ctx->inputs[0];
326  DrawBoxContext *s = ctx->priv;
327  int old_x = s->x;
328  int old_y = s->y;
329  int old_w = s->w;
330  int old_h = s->h;
331  int old_t = s->thickness;
332  int old_r = s->replace;
333  int ret;
334 
335  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
336  if (ret < 0)
337  return ret;
338 
339  ret = init(ctx);
340  if (ret < 0)
341  goto end;
343 end:
344  if (ret < 0) {
345  s->x = old_x;
346  s->y = old_y;
347  s->w = old_w;
348  s->h = old_h;
349  s->thickness = old_t;
350  s->replace = old_r;
351  }
352 
353  return ret;
354 }
355 
356 #define OFFSET(x) offsetof(DrawBoxContext, x)
357 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
358 
359 #if CONFIG_DRAWBOX_FILTER
360 
361 static const AVOption drawbox_options[] = {
362  { "x", "set horizontal position of the left box edge", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
363  { "y", "set vertical position of the top box edge", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
364  { "width", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
365  { "w", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
366  { "height", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
367  { "h", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
368  { "color", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
369  { "c", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
370  { "thickness", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, 0, 0, FLAGS },
371  { "t", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, 0, 0, FLAGS },
372  { "replace", "replace color & alpha", OFFSET(replace), AV_OPT_TYPE_BOOL, { .i64=0 }, 0, 1, FLAGS },
373  { "box_source", "use datas from bounding box in side data", OFFSET(box_source_string), AV_OPT_TYPE_STRING, { .str=NULL }, 0, 1, FLAGS },
374  { NULL }
375 };
376 
377 AVFILTER_DEFINE_CLASS(drawbox);
378 
379 static const AVFilterPad drawbox_inputs[] = {
380  {
381  .name = "default",
382  .type = AVMEDIA_TYPE_VIDEO,
384  .config_props = config_input,
385  .filter_frame = filter_frame,
386  },
387 };
388 
389 static const AVFilterPad drawbox_outputs[] = {
390  {
391  .name = "default",
392  .type = AVMEDIA_TYPE_VIDEO,
393  },
394 };
395 
396 const AVFilter ff_vf_drawbox = {
397  .name = "drawbox",
398  .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
399  .priv_size = sizeof(DrawBoxContext),
400  .priv_class = &drawbox_class,
401  .init = init,
402  FILTER_INPUTS(drawbox_inputs),
403  FILTER_OUTPUTS(drawbox_outputs),
405  .process_command = process_command,
407 };
408 #endif /* CONFIG_DRAWBOX_FILTER */
409 
410 #if CONFIG_DRAWGRID_FILTER
411 static av_pure av_always_inline int pixel_belongs_to_grid(DrawBoxContext *drawgrid, int x, int y)
412 {
413  // x is horizontal (width) coord,
414  // y is vertical (height) coord
415  int x_modulo;
416  int y_modulo;
417 
418  // Abstract from the offset
419  x -= drawgrid->x;
420  y -= drawgrid->y;
421 
422  x_modulo = x % drawgrid->w;
423  y_modulo = y % drawgrid->h;
424 
425  // If x or y got negative, fix values to preserve logics
426  if (x_modulo < 0)
427  x_modulo += drawgrid->w;
428  if (y_modulo < 0)
429  y_modulo += drawgrid->h;
430 
431  return x_modulo < drawgrid->thickness // Belongs to vertical line
432  || y_modulo < drawgrid->thickness; // Belongs to horizontal line
433 }
434 
435 static int drawgrid_filter_frame(AVFilterLink *inlink, AVFrame *frame)
436 {
437  DrawBoxContext *drawgrid = inlink->dst->priv;
438 
439  draw_region(frame, drawgrid, 0, 0, frame->width, frame->height, pixel_belongs_to_grid);
440 
441  return ff_filter_frame(inlink->dst->outputs[0], frame);
442 }
443 
444 static const AVOption drawgrid_options[] = {
445  { "x", "set horizontal offset", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
446  { "y", "set vertical offset", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
447  { "width", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
448  { "w", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
449  { "height", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
450  { "h", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
451  { "color", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
452  { "c", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
453  { "thickness", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, 0, 0, FLAGS },
454  { "t", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, 0, 0, FLAGS },
455  { "replace", "replace color & alpha", OFFSET(replace), AV_OPT_TYPE_BOOL, { .i64=0 }, 0, 1, FLAGS },
456  { NULL }
457 };
458 
459 AVFILTER_DEFINE_CLASS(drawgrid);
460 
461 static const AVFilterPad drawgrid_inputs[] = {
462  {
463  .name = "default",
464  .type = AVMEDIA_TYPE_VIDEO,
466  .config_props = config_input,
467  .filter_frame = drawgrid_filter_frame,
468  },
469 };
470 
471 static const AVFilterPad drawgrid_outputs[] = {
472  {
473  .name = "default",
474  .type = AVMEDIA_TYPE_VIDEO,
475  },
476 };
477 
478 const AVFilter ff_vf_drawgrid = {
479  .name = "drawgrid",
480  .description = NULL_IF_CONFIG_SMALL("Draw a colored grid on the input video."),
481  .priv_size = sizeof(DrawBoxContext),
482  .priv_class = &drawgrid_class,
483  .init = init,
484  FILTER_INPUTS(drawgrid_inputs),
485  FILTER_OUTPUTS(drawgrid_outputs),
488  .process_command = process_command,
489 };
490 
491 #endif /* CONFIG_DRAWGRID_FILTER */
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
A
@ A
Definition: vf_drawbox.c:55
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:617
ASSIGN_FOUR_CHANNELS
#define ASSIGN_FOUR_CHANNELS
Definition: vf_drawbox.c:98
DrawBoxContext::vsub
int vsub
Definition: vf_drawbox.c:79
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:356
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_drawbox.c:323
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:171
VARS_NB
@ VARS_NB
Definition: vf_drawbox.c:69
RGB_TO_U_CCIR
#define RGB_TO_U_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:102
VAR_MAX
@ VAR_MAX
Definition: vf_drawbox.c:68
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
DrawBoxContext::thickness
int thickness
Definition: vf_drawbox.c:75
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_drawbox.c:195
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
pixdesc.h
Y
@ Y
Definition: vf_drawbox.c:55
AVOption
AVOption.
Definition: opt.h:247
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
var_names
static const char *const var_names[]
Definition: vf_drawbox.c:40
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
box_source_string_parse
static enum AVFrameSideDataType box_source_string_parse(const char *box_source_string)
Definition: vf_drawbox.c:147
av_pure
#define av_pure
Definition: attributes.h:78
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
video.h
V
@ V
Definition: vf_drawbox.c:55
VAR_Y
@ VAR_Y
Definition: vf_drawbox.c:64
formats.h
DrawBoxContext::box_source_string
char * box_source_string
string for box data source
Definition: vf_drawbox.c:83
fail
#define fail()
Definition: checkasm.h:127
DrawBoxContext::y
int y
Definition: vf_drawbox.c:74
VAR_HSUB
@ VAR_HSUB
Definition: vf_drawbox.c:59
loop
static int loop
Definition: ffplay.c:339
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:48
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
FLAGS
#define FLAGS
Definition: vf_drawbox.c:357
colorspace.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
DrawBoxContext::h_expr
char * h_expr
expression for width and height
Definition: vf_drawbox.c:81
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_drawbox.c:158
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
RGB_TO_Y_CCIR
#define RGB_TO_Y_CCIR(r, g, b)
Definition: colorspace.h:98
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
var_name
var_name
Definition: noise_bsf.c:47
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:48
DrawBoxContext::yuv_color
unsigned char yuv_color[4]
Definition: vf_drawbox.c:77
U
@ U
Definition: vf_drawbox.c:55
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
NAN
#define NAN
Definition: mathematics.h:64
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
DrawBoxContext::color_str
char * color_str
Definition: vf_drawbox.c:76
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
if
if(ret)
Definition: filter_design.txt:179
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
VAR_W
@ VAR_W
Definition: vf_drawbox.c:66
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
VAR_IN_H
@ VAR_IN_H
Definition: vf_drawbox.c:60
RGB_TO_V_CCIR
#define RGB_TO_V_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:106
parseutils.h
VAR_IW
@ VAR_IW
Definition: vf_drawbox.c:61
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
draw_region
static void draw_region(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down, PixelBelongsToRegion pixel_belongs_to_region)
Definition: vf_drawbox.c:102
VAR_X
@ VAR_X
Definition: vf_drawbox.c:63
eval.h
pixel_belongs_to_box
static av_pure av_always_inline int pixel_belongs_to_box(DrawBoxContext *s, int x, int y)
Definition: vf_drawbox.c:282
DrawBoxContext::x
int x
Definition: vf_drawbox.c:74
DrawBoxContext::box_source
enum AVFrameSideDataType box_source
Definition: vf_drawbox.c:86
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_expr_parse_and_eval
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:776
DrawBoxContext::x_expr
char * x_expr
Definition: vf_drawbox.c:80
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
ff_vf_drawgrid
const AVFilter ff_vf_drawgrid
AVFrameSideData::data
uint8_t * data
Definition: frame.h:225
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
header
static const uint8_t header[24]
Definition: sdr2.c:67
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
VAR_SAR
@ VAR_SAR
Definition: vf_drawbox.c:62
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:146
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:326
DrawBoxContext::t_expr
char * t_expr
expression for thickness
Definition: vf_drawbox.c:82
VAR_VSUB
@ VAR_VSUB
Definition: vf_drawbox.c:59
ASSIGN_THREE_CHANNELS
#define ASSIGN_THREE_CHANNELS
Definition: vf_drawbox.c:93
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_drawbox.c:186
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
common.h
PixelBelongsToRegion
int(* PixelBelongsToRegion)(DrawBoxContext *s, int x, int y)
Definition: vf_drawbox.c:91
DrawBoxContext
Definition: vf_drawbox.c:72
DrawBoxContext::w_expr
char * w_expr
Definition: vf_drawbox.c:81
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
VAR_DAR
@ VAR_DAR
Definition: vf_drawbox.c:58
AVFilter
Filter definition.
Definition: avfilter.h:165
VAR_H
@ VAR_H
Definition: vf_drawbox.c:65
ret
ret
Definition: filter_design.txt:187
ff_vf_drawbox
const AVFilter ff_vf_drawbox
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
VAR_IN_W
@ VAR_IN_W
Definition: vf_drawbox.c:61
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
DrawBoxContext::invert_color
int invert_color
invert luma color
Definition: vf_drawbox.c:78
avfilter.h
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
DrawBoxContext::have_alpha
int have_alpha
Definition: vf_drawbox.c:84
DrawBoxContext::y_expr
char * y_expr
expression for x and y
Definition: vf_drawbox.c:80
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:223
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
NUM_EXPR_EVALS
static const int NUM_EXPR_EVALS
Definition: vf_drawbox.c:89
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
VAR_IH
@ VAR_IH
Definition: vf_drawbox.c:60
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_drawbox.c:288
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
DrawBoxContext::h
int h
Definition: vf_drawbox.c:74
AVDetectionBBox
Definition: detection_bbox.h:26
DrawBoxContext::replace
int replace
Definition: vf_drawbox.c:85
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
DrawBoxContext::hsub
int hsub
chroma subsampling
Definition: vf_drawbox.c:79
int
int
Definition: ffmpeg_filter.c:153
DrawBoxContext::w
int w
Definition: vf_drawbox.c:74
OFFSET
#define OFFSET(x)
Definition: vf_drawbox.c:356
detection_bbox.h
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:189
VAR_T
@ VAR_T
Definition: vf_drawbox.c:67
AVFILTERPAD_FLAG_NEEDS_WRITABLE
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
Definition: internal.h:69