FFmpeg
vf_drawbox.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008 Affine Systems, Inc (Michael Sullivan, Bobby Impollonia)
3  * Copyright (c) 2013 Andrey Utkin <andrey.krieger.utkin gmail com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Box and grid drawing filters. Also a nice template for a filter
25  * that needs to write in the input frame.
26  */
27 
28 #include "config_components.h"
29 
30 #include "libavutil/colorspace.h"
31 #include "libavutil/common.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/eval.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/parseutils.h"
37 #include "avfilter.h"
38 #include "drawutils.h"
39 #include "formats.h"
40 #include "internal.h"
41 #include "video.h"
42 
43 static const char *const var_names[] = {
44  "dar",
45  "hsub", "vsub",
46  "in_h", "ih", ///< height of the input video
47  "in_w", "iw", ///< width of the input video
48  "sar",
49  "x",
50  "y",
51  "h", ///< height of the rendered box
52  "w", ///< width of the rendered box
53  "t",
54  "fill",
55  NULL
56 };
57 
58 enum { Y, U, V, A };
59 enum { R, G, B };
60 
61 enum var_name {
74 };
75 
76 struct DrawBoxContext;
77 
78 typedef int (*PixelBelongsToRegion)(struct DrawBoxContext *s, int x, int y);
79 
80 typedef struct DrawBoxContext {
81  const AVClass *class;
82  int x, y, w, h;
83  int thickness;
84  char *color_str;
85  uint8_t rgba_map[4];
86  uint8_t rgba_color[4];
87  unsigned char yuv_color[4];
88  int invert_color; ///< invert luma color
89  int vsub, hsub; ///< chroma subsampling
90  char *x_expr, *y_expr; ///< expression for x and y
91  char *w_expr, *h_expr; ///< expression for width and height
92  char *t_expr; ///< expression for thickness
93  char *box_source_string; ///< string for box data source
95  int replace;
96  int step;
98 
99  void (*draw_region)(AVFrame *frame, struct DrawBoxContext *ctx, int left, int top, int right, int down,
100  PixelBelongsToRegion pixel_belongs_to_region);
102 
103 static const int NUM_EXPR_EVALS = 5;
104 
105 #define ASSIGN_THREE_CHANNELS \
106  row[0] = frame->data[0] + y * frame->linesize[0]; \
107  row[1] = frame->data[1] + (y >> ctx->vsub) * frame->linesize[1]; \
108  row[2] = frame->data[2] + (y >> ctx->vsub) * frame->linesize[2];
109 
110 #define ASSIGN_FOUR_CHANNELS \
111  ASSIGN_THREE_CHANNELS \
112  row[3] = frame->data[3] + y * frame->linesize[3];
113 
114 static void draw_region(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down,
115  PixelBelongsToRegion pixel_belongs_to_region)
116 {
117  unsigned char *row[4];
118  int x, y;
119  if (ctx->have_alpha && ctx->replace) {
120  for (y = top; y < down; y++) {
122  if (ctx->invert_color) {
123  for (x = left; x < right; x++)
124  if (pixel_belongs_to_region(ctx, x, y))
125  row[0][x] = 0xff - row[0][x];
126  } else {
127  for (x = left; x < right; x++) {
128  if (pixel_belongs_to_region(ctx, x, y)) {
129  row[0][x ] = ctx->yuv_color[Y];
130  row[1][x >> ctx->hsub] = ctx->yuv_color[U];
131  row[2][x >> ctx->hsub] = ctx->yuv_color[V];
132  row[3][x ] = ctx->yuv_color[A];
133  }
134  }
135  }
136  }
137  } else {
138  for (y = top; y < down; y++) {
140  if (ctx->invert_color) {
141  for (x = left; x < right; x++)
142  if (pixel_belongs_to_region(ctx, x, y))
143  row[0][x] = 0xff - row[0][x];
144  } else {
145  for (x = left; x < right; x++) {
146  double alpha = (double)ctx->yuv_color[A] / 255;
147 
148  if (pixel_belongs_to_region(ctx, x, y)) {
149  row[0][x ] = (1 - alpha) * row[0][x ] + alpha * ctx->yuv_color[Y];
150  row[1][x >> ctx->hsub] = (1 - alpha) * row[1][x >> ctx->hsub] + alpha * ctx->yuv_color[U];
151  row[2][x >> ctx->hsub] = (1 - alpha) * row[2][x >> ctx->hsub] + alpha * ctx->yuv_color[V];
152  }
153  }
154  }
155  }
156  }
157 }
158 
159 #define ASSIGN_THREE_CHANNELS_PACKED \
160  row[0] = frame->data[0] + y * frame->linesize[0] + ctx->rgba_map[0]; \
161  row[1] = frame->data[0] + y * frame->linesize[0] + ctx->rgba_map[1]; \
162  row[2] = frame->data[0] + y * frame->linesize[0] + ctx->rgba_map[2];
163 
164 #define ASSIGN_FOUR_CHANNELS_PACKED \
165  ASSIGN_THREE_CHANNELS_PACKED \
166  row[3] = frame->data[0] + y * frame->linesize[0] + ctx->rgba_map[3];
167 
168 static void draw_region_rgb_packed(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down,
169  PixelBelongsToRegion pixel_belongs_to_region)
170 {
171  const int C = ctx->step;
172  uint8_t *row[4];
173 
174  if (ctx->have_alpha && ctx->replace) {
175  for (int y = top; y < down; y++) {
177  if (ctx->invert_color) {
178  for (int x = left; x < right; x++)
179  if (pixel_belongs_to_region(ctx, x, y)) {
180  row[0][x*C] = 0xff - row[0][x*C];
181  row[1][x*C] = 0xff - row[1][x*C];
182  row[2][x*C] = 0xff - row[2][x*C];
183  }
184  } else {
185  for (int x = left; x < right; x++) {
186  if (pixel_belongs_to_region(ctx, x, y)) {
187  row[0][x*C] = ctx->rgba_color[R];
188  row[1][x*C] = ctx->rgba_color[G];
189  row[2][x*C] = ctx->rgba_color[B];
190  row[3][x*C] = ctx->rgba_color[A];
191  }
192  }
193  }
194  }
195  } else {
196  for (int y = top; y < down; y++) {
198  if (ctx->invert_color) {
199  for (int x = left; x < right; x++)
200  if (pixel_belongs_to_region(ctx, x, y)) {
201  row[0][x*C] = 0xff - row[0][x*C];
202  row[1][x*C] = 0xff - row[1][x*C];
203  row[2][x*C] = 0xff - row[2][x*C];
204  }
205  } else {
206  for (int x = left; x < right; x++) {
207  float alpha = (float)ctx->rgba_color[A] / 255.f;
208 
209  if (pixel_belongs_to_region(ctx, x, y)) {
210  row[0][x*C] = (1.f - alpha) * row[0][x*C] + alpha * ctx->rgba_color[R];
211  row[1][x*C] = (1.f - alpha) * row[1][x*C] + alpha * ctx->rgba_color[G];
212  row[2][x*C] = (1.f - alpha) * row[2][x*C] + alpha * ctx->rgba_color[B];
213  }
214  }
215  }
216  }
217  }
218 }
219 
221 {
223  if (!strcmp(box_source_string, "side_data_detection_bboxes")) {
225  } else {
226  // will support side_data_regions_of_interest next
227  return AVERROR(EINVAL);
228  }
229 }
230 
232 {
233  DrawBoxContext *s = ctx->priv;
234 
235  if (s->box_source_string) {
236  s->box_source = box_source_string_parse(s->box_source_string);
237  if ((int)s->box_source < 0) {
238  av_log(ctx, AV_LOG_ERROR, "Error box source: %s\n",s->box_source_string);
239  return AVERROR(EINVAL);
240  }
241  }
242 
243  if (!strcmp(s->color_str, "invert"))
244  s->invert_color = 1;
245  else if (av_parse_color(s->rgba_color, s->color_str, -1, ctx) < 0)
246  return AVERROR(EINVAL);
247 
248  if (!s->invert_color) {
249  s->yuv_color[Y] = RGB_TO_Y_CCIR(s->rgba_color[0], s->rgba_color[1], s->rgba_color[2]);
250  s->yuv_color[U] = RGB_TO_U_CCIR(s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], 0);
251  s->yuv_color[V] = RGB_TO_V_CCIR(s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], 0);
252  s->yuv_color[A] = s->rgba_color[3];
253  }
254 
255  return 0;
256 }
257 
258 static const enum AVPixelFormat pix_fmts[] = {
270 };
271 
273 {
274  AVFilterContext *ctx = inlink->dst;
275  DrawBoxContext *s = ctx->priv;
277  double var_values[VARS_NB], res;
278  char *expr;
279  int ret;
280  int i;
281 
282  ff_fill_rgba_map(s->rgba_map, inlink->format);
283 
284  if (!(desc->flags & AV_PIX_FMT_FLAG_RGB))
285  s->draw_region = draw_region;
286  else
287  s->draw_region = draw_region_rgb_packed;
288 
289  s->step = av_get_padded_bits_per_pixel(desc) >> 3;
290  s->hsub = desc->log2_chroma_w;
291  s->vsub = desc->log2_chroma_h;
292  s->have_alpha = desc->flags & AV_PIX_FMT_FLAG_ALPHA;
293 
294  var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
295  var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
296  var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
297  var_values[VAR_DAR] = (double)inlink->w / inlink->h * var_values[VAR_SAR];
298  var_values[VAR_HSUB] = s->hsub;
299  var_values[VAR_VSUB] = s->vsub;
300  var_values[VAR_X] = NAN;
301  var_values[VAR_Y] = NAN;
302  var_values[VAR_H] = NAN;
303  var_values[VAR_W] = NAN;
304  var_values[VAR_T] = NAN;
305 
306  for (i = 0; i <= NUM_EXPR_EVALS; i++) {
307  /* evaluate expressions, fail on last iteration */
308  var_values[VAR_MAX] = inlink->w;
309  if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
310  var_names, var_values,
311  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
312  goto fail;
313  s->x = var_values[VAR_X] = res;
314 
315  var_values[VAR_MAX] = inlink->h;
316  if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
317  var_names, var_values,
318  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
319  goto fail;
320  s->y = var_values[VAR_Y] = res;
321 
322  var_values[VAR_MAX] = inlink->w - s->x;
323  if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
324  var_names, var_values,
325  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
326  goto fail;
327  s->w = var_values[VAR_W] = res;
328 
329  var_values[VAR_MAX] = inlink->h - s->y;
330  if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
331  var_names, var_values,
332  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
333  goto fail;
334  s->h = var_values[VAR_H] = res;
335 
336  var_values[VAR_MAX] = INT_MAX;
337  if ((ret = av_expr_parse_and_eval(&res, (expr = s->t_expr),
338  var_names, var_values,
339  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
340  goto fail;
341  s->thickness = var_values[VAR_T] = res;
342  }
343 
344  /* if w or h are zero, use the input w/h */
345  s->w = (s->w > 0) ? s->w : inlink->w;
346  s->h = (s->h > 0) ? s->h : inlink->h;
347 
348  /* sanity check width and height */
349  if (s->w < 0 || s->h < 0) {
350  av_log(ctx, AV_LOG_ERROR, "Size values less than 0 are not acceptable.\n");
351  return AVERROR(EINVAL);
352  }
353 
354  av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
355  s->x, s->y, s->w, s->h,
356  s->yuv_color[Y], s->yuv_color[U], s->yuv_color[V], s->yuv_color[A]);
357 
358  return 0;
359 
360 fail:
362  "Error when evaluating the expression '%s'.\n",
363  expr);
364  return ret;
365 }
366 
368 {
369  return (y - s->y < s->thickness) || (s->y + s->h - 1 - y < s->thickness) ||
370  (x - s->x < s->thickness) || (s->x + s->w - 1 - x < s->thickness);
371 }
372 
374 {
375  DrawBoxContext *s = inlink->dst->priv;
377  const AVDetectionBBox *bbox;
378  AVFrameSideData *sd;
379  int loop = 1;
380 
381  if (s->box_source == AV_FRAME_DATA_DETECTION_BBOXES) {
383  if (sd) {
385  loop = header->nb_bboxes;
386  } else {
387  av_log(s, AV_LOG_WARNING, "No detection bboxes.\n");
388  return ff_filter_frame(inlink->dst->outputs[0], frame);
389  }
390  }
391 
392  for (int i = 0; i < loop; i++) {
393  if (header) {
394  bbox = av_get_detection_bbox(header, i);
395  s->y = bbox->y;
396  s->x = bbox->x;
397  s->h = bbox->h;
398  s->w = bbox->w;
399  }
400 
401  s->draw_region(frame, s, FFMAX(s->x, 0), FFMAX(s->y, 0), FFMIN(s->x + s->w, frame->width),
402  FFMIN(s->y + s->h, frame->height), pixel_belongs_to_box);
403  }
404 
405  return ff_filter_frame(inlink->dst->outputs[0], frame);
406 }
407 
408 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
409 {
410  AVFilterLink *inlink = ctx->inputs[0];
411  DrawBoxContext *s = ctx->priv;
412  int old_x = s->x;
413  int old_y = s->y;
414  int old_w = s->w;
415  int old_h = s->h;
416  int old_t = s->thickness;
417  int old_r = s->replace;
418  int ret;
419 
420  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
421  if (ret < 0)
422  return ret;
423 
424  ret = init(ctx);
425  if (ret < 0)
426  goto end;
428 end:
429  if (ret < 0) {
430  s->x = old_x;
431  s->y = old_y;
432  s->w = old_w;
433  s->h = old_h;
434  s->thickness = old_t;
435  s->replace = old_r;
436  }
437 
438  return ret;
439 }
440 
441 #define OFFSET(x) offsetof(DrawBoxContext, x)
442 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
443 
444 #if CONFIG_DRAWBOX_FILTER
445 
446 static const AVOption drawbox_options[] = {
447  { "x", "set horizontal position of the left box edge", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
448  { "y", "set vertical position of the top box edge", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
449  { "width", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
450  { "w", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
451  { "height", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
452  { "h", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
453  { "color", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
454  { "c", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
455  { "thickness", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, 0, 0, FLAGS },
456  { "t", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, 0, 0, FLAGS },
457  { "replace", "replace color & alpha", OFFSET(replace), AV_OPT_TYPE_BOOL, { .i64=0 }, 0, 1, FLAGS },
458  { "box_source", "use datas from bounding box in side data", OFFSET(box_source_string), AV_OPT_TYPE_STRING, { .str=NULL }, 0, 1, FLAGS },
459  { NULL }
460 };
461 
462 AVFILTER_DEFINE_CLASS(drawbox);
463 
464 static const AVFilterPad drawbox_inputs[] = {
465  {
466  .name = "default",
467  .type = AVMEDIA_TYPE_VIDEO,
469  .config_props = config_input,
470  .filter_frame = filter_frame,
471  },
472 };
473 
474 static const AVFilterPad drawbox_outputs[] = {
475  {
476  .name = "default",
477  .type = AVMEDIA_TYPE_VIDEO,
478  },
479 };
480 
481 const AVFilter ff_vf_drawbox = {
482  .name = "drawbox",
483  .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
484  .priv_size = sizeof(DrawBoxContext),
485  .priv_class = &drawbox_class,
486  .init = init,
487  FILTER_INPUTS(drawbox_inputs),
488  FILTER_OUTPUTS(drawbox_outputs),
490  .process_command = process_command,
492 };
493 #endif /* CONFIG_DRAWBOX_FILTER */
494 
495 #if CONFIG_DRAWGRID_FILTER
496 static av_pure av_always_inline int pixel_belongs_to_grid(DrawBoxContext *drawgrid, int x, int y)
497 {
498  // x is horizontal (width) coord,
499  // y is vertical (height) coord
500  int x_modulo;
501  int y_modulo;
502 
503  // Abstract from the offset
504  x -= drawgrid->x;
505  y -= drawgrid->y;
506 
507  x_modulo = x % drawgrid->w;
508  y_modulo = y % drawgrid->h;
509 
510  // If x or y got negative, fix values to preserve logics
511  if (x_modulo < 0)
512  x_modulo += drawgrid->w;
513  if (y_modulo < 0)
514  y_modulo += drawgrid->h;
515 
516  return x_modulo < drawgrid->thickness // Belongs to vertical line
517  || y_modulo < drawgrid->thickness; // Belongs to horizontal line
518 }
519 
520 static int drawgrid_filter_frame(AVFilterLink *inlink, AVFrame *frame)
521 {
522  DrawBoxContext *drawgrid = inlink->dst->priv;
523 
524  drawgrid->draw_region(frame, drawgrid, 0, 0, frame->width, frame->height, pixel_belongs_to_grid);
525 
526  return ff_filter_frame(inlink->dst->outputs[0], frame);
527 }
528 
529 static const AVOption drawgrid_options[] = {
530  { "x", "set horizontal offset", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
531  { "y", "set vertical offset", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
532  { "width", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
533  { "w", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
534  { "height", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
535  { "h", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, 0, 0, FLAGS },
536  { "color", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
537  { "c", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, 0, 0, FLAGS },
538  { "thickness", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, 0, 0, FLAGS },
539  { "t", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, 0, 0, FLAGS },
540  { "replace", "replace color & alpha", OFFSET(replace), AV_OPT_TYPE_BOOL, { .i64=0 }, 0, 1, FLAGS },
541  { NULL }
542 };
543 
544 AVFILTER_DEFINE_CLASS(drawgrid);
545 
546 static const AVFilterPad drawgrid_inputs[] = {
547  {
548  .name = "default",
549  .type = AVMEDIA_TYPE_VIDEO,
551  .config_props = config_input,
552  .filter_frame = drawgrid_filter_frame,
553  },
554 };
555 
556 static const AVFilterPad drawgrid_outputs[] = {
557  {
558  .name = "default",
559  .type = AVMEDIA_TYPE_VIDEO,
560  },
561 };
562 
563 const AVFilter ff_vf_drawgrid = {
564  .name = "drawgrid",
565  .description = NULL_IF_CONFIG_SMALL("Draw a colored grid on the input video."),
566  .priv_size = sizeof(DrawBoxContext),
567  .priv_class = &drawgrid_class,
568  .init = init,
569  FILTER_INPUTS(drawgrid_inputs),
570  FILTER_OUTPUTS(drawgrid_outputs),
573  .process_command = process_command,
574 };
575 
576 #endif /* CONFIG_DRAWGRID_FILTER */
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
DrawBoxContext::step
int step
Definition: vf_drawbox.c:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
G
@ G
Definition: vf_drawbox.c:59
B
@ B
Definition: vf_drawbox.c:59
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:682
ASSIGN_FOUR_CHANNELS
#define ASSIGN_FOUR_CHANNELS
Definition: vf_drawbox.c:110
DrawBoxContext::vsub
int vsub
Definition: vf_drawbox.c:89
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:356
Y
@ Y
Definition: vf_drawbox.c:58
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_drawbox.c:408
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:174
VARS_NB
@ VARS_NB
Definition: vf_drawbox.c:73
RGB_TO_U_CCIR
#define RGB_TO_U_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:102
VAR_MAX
@ VAR_MAX
Definition: vf_drawbox.c:72
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
DrawBoxContext::thickness
int thickness
Definition: vf_drawbox.c:83
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_drawbox.c:272
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
AVOption
AVOption.
Definition: opt.h:251
DrawBoxContext::draw_region
void(* draw_region)(AVFrame *frame, struct DrawBoxContext *ctx, int left, int top, int right, int down, PixelBelongsToRegion pixel_belongs_to_region)
Definition: vf_drawbox.c:99
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
var_names
static const char *const var_names[]
Definition: vf_drawbox.c:43
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
box_source_string_parse
static enum AVFrameSideDataType box_source_string_parse(const char *box_source_string)
Definition: vf_drawbox.c:220
av_pure
#define av_pure
Definition: attributes.h:78
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
video.h
VAR_Y
@ VAR_Y
Definition: vf_drawbox.c:68
formats.h
PixelBelongsToRegion
int(* PixelBelongsToRegion)(struct DrawBoxContext *s, int x, int y)
Definition: vf_drawbox.c:78
DrawBoxContext::box_source_string
char * box_source_string
string for box data source
Definition: vf_drawbox.c:93
fail
#define fail()
Definition: checkasm.h:134
DrawBoxContext::y
int y
Definition: vf_drawbox.c:82
VAR_HSUB
@ VAR_HSUB
Definition: vf_drawbox.c:63
loop
static int loop
Definition: ffplay.c:340
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:49
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
FLAGS
#define FLAGS
Definition: vf_drawbox.c:442
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
colorspace.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
DrawBoxContext::h_expr
char * h_expr
expression for width and height
Definition: vf_drawbox.c:91
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_drawbox.c:231
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
float
float
Definition: af_crystalizer.c:122
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
RGB_TO_Y_CCIR
#define RGB_TO_Y_CCIR(r, g, b)
Definition: colorspace.h:98
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
R
@ R
Definition: vf_drawbox.c:59
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
ASSIGN_THREE_CHANNELS_PACKED
#define ASSIGN_THREE_CHANNELS_PACKED
Definition: vf_drawbox.c:159
var_name
var_name
Definition: noise_bsf.c:46
DrawBoxContext::rgba_color
uint8_t rgba_color[4]
Definition: vf_drawbox.c:86
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:48
DrawBoxContext::yuv_color
unsigned char yuv_color[4]
Definition: vf_drawbox.c:87
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
NAN
#define NAN
Definition: mathematics.h:64
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
DrawBoxContext::color_str
char * color_str
Definition: vf_drawbox.c:84
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
A
@ A
Definition: vf_drawbox.c:58
if
if(ret)
Definition: filter_design.txt:179
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
VAR_W
@ VAR_W
Definition: vf_drawbox.c:70
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
VAR_IN_H
@ VAR_IN_H
Definition: vf_drawbox.c:64
RGB_TO_V_CCIR
#define RGB_TO_V_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:106
parseutils.h
double
double
Definition: af_crystalizer.c:132
U
@ U
Definition: vf_drawbox.c:58
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:258
VAR_IW
@ VAR_IW
Definition: vf_drawbox.c:65
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
draw_region
static void draw_region(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down, PixelBelongsToRegion pixel_belongs_to_region)
Definition: vf_drawbox.c:114
VAR_X
@ VAR_X
Definition: vf_drawbox.c:67
eval.h
pixel_belongs_to_box
static av_pure av_always_inline int pixel_belongs_to_box(DrawBoxContext *s, int x, int y)
Definition: vf_drawbox.c:367
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
DrawBoxContext::x
int x
Definition: vf_drawbox.c:82
DrawBoxContext::box_source
enum AVFrameSideDataType box_source
Definition: vf_drawbox.c:97
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
av_expr_parse_and_eval
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:776
DrawBoxContext::x_expr
char * x_expr
Definition: vf_drawbox.c:90
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
av_get_padded_bits_per_pixel
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
Definition: pixdesc.c:2853
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_vf_drawgrid
const AVFilter ff_vf_drawgrid
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:842
header
static const uint8_t header[24]
Definition: sdr2.c:67
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:256
VAR_SAR
@ VAR_SAR
Definition: vf_drawbox.c:66
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:142
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:329
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
DrawBoxContext::t_expr
char * t_expr
expression for thickness
Definition: vf_drawbox.c:92
ASSIGN_FOUR_CHANNELS_PACKED
#define ASSIGN_FOUR_CHANNELS_PACKED
Definition: vf_drawbox.c:164
VAR_VSUB
@ VAR_VSUB
Definition: vf_drawbox.c:63
ASSIGN_THREE_CHANNELS
#define ASSIGN_THREE_CHANNELS
Definition: vf_drawbox.c:105
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_drawbox.c:258
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
common.h
DrawBoxContext
Definition: vf_drawbox.c:80
DrawBoxContext::w_expr
char * w_expr
Definition: vf_drawbox.c:91
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
DrawBoxContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_drawbox.c:85
VAR_DAR
@ VAR_DAR
Definition: vf_drawbox.c:62
AVFilter
Filter definition.
Definition: avfilter.h:161
VAR_H
@ VAR_H
Definition: vf_drawbox.c:69
ret
ret
Definition: filter_design.txt:187
ff_vf_drawbox
const AVFilter ff_vf_drawbox
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:257
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
VAR_IN_W
@ VAR_IN_W
Definition: vf_drawbox.c:65
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
DrawBoxContext::invert_color
int invert_color
invert luma color
Definition: vf_drawbox.c:88
avfilter.h
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
draw_region_rgb_packed
static void draw_region_rgb_packed(AVFrame *frame, DrawBoxContext *ctx, int left, int top, int right, int down, PixelBelongsToRegion pixel_belongs_to_region)
Definition: vf_drawbox.c:168
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
DrawBoxContext::have_alpha
int have_alpha
Definition: vf_drawbox.c:94
DrawBoxContext::y_expr
char * y_expr
expression for x and y
Definition: vf_drawbox.c:90
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
NUM_EXPR_EVALS
static const int NUM_EXPR_EVALS
Definition: vf_drawbox.c:103
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
VAR_IH
@ VAR_IH
Definition: vf_drawbox.c:64
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_drawbox.c:373
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
V
@ V
Definition: vf_drawbox.c:58
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:255
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
DrawBoxContext::h
int h
Definition: vf_drawbox.c:82
AVDetectionBBox
Definition: detection_bbox.h:26
DrawBoxContext::replace
int replace
Definition: vf_drawbox.c:95
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
drawutils.h
DrawBoxContext::hsub
int hsub
chroma subsampling
Definition: vf_drawbox.c:89
int
int
Definition: ffmpeg_filter.c:156
DrawBoxContext::w
int w
Definition: vf_drawbox.c:82
OFFSET
#define OFFSET(x)
Definition: vf_drawbox.c:441
detection_bbox.h
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:190
VAR_T
@ VAR_T
Definition: vf_drawbox.c:71
AVFILTERPAD_FLAG_NEEDS_WRITABLE
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
Definition: internal.h:68