FFmpeg
vf_overlay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  * Copyright (c) 2010 Baptiste Coudurier
4  * Copyright (c) 2007 Bobby Bingham
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * overlay one video on top of another
26  */
27 
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "libavutil/common.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timestamp.h"
38 #include "internal.h"
39 #include "drawutils.h"
40 #include "framesync.h"
41 #include "video.h"
42 #include "vf_overlay.h"
43 
44 typedef struct ThreadData {
45  AVFrame *dst, *src;
46 } ThreadData;
47 
48 static const char *const var_names[] = {
49  "main_w", "W", ///< width of the main video
50  "main_h", "H", ///< height of the main video
51  "overlay_w", "w", ///< width of the overlay video
52  "overlay_h", "h", ///< height of the overlay video
53  "hsub",
54  "vsub",
55  "x",
56  "y",
57  "n", ///< number of frame
58 #if FF_API_FRAME_PKT
59  "pos", ///< position in the file
60 #endif
61  "t", ///< timestamp expressed in seconds
62  NULL
63 };
64 
65 #define MAIN 0
66 #define OVERLAY 1
67 
68 #define R 0
69 #define G 1
70 #define B 2
71 #define A 3
72 
73 #define Y 0
74 #define U 1
75 #define V 2
76 
77 enum EvalMode {
81 };
82 
84 {
85  OverlayContext *s = ctx->priv;
86 
87  ff_framesync_uninit(&s->fs);
88  av_expr_free(s->x_pexpr); s->x_pexpr = NULL;
89  av_expr_free(s->y_pexpr); s->y_pexpr = NULL;
90 }
91 
92 static inline int normalize_xy(double d, int chroma_sub)
93 {
94  if (isnan(d))
95  return INT_MAX;
96  return (int)d & ~((1 << chroma_sub) - 1);
97 }
98 
100 {
101  OverlayContext *s = ctx->priv;
102 
103  s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
104  s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
105  /* It is necessary if x is expressed from y */
106  s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
107  s->x = normalize_xy(s->var_values[VAR_X], s->hsub);
108  s->y = normalize_xy(s->var_values[VAR_Y], s->vsub);
109 }
110 
111 static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
112 {
113  int ret;
114  AVExpr *old = NULL;
115 
116  if (*pexpr)
117  old = *pexpr;
118  ret = av_expr_parse(pexpr, expr, var_names,
119  NULL, NULL, NULL, NULL, 0, log_ctx);
120  if (ret < 0) {
121  av_log(log_ctx, AV_LOG_ERROR,
122  "Error when evaluating the expression '%s' for %s\n",
123  expr, option);
124  *pexpr = old;
125  return ret;
126  }
127 
128  av_expr_free(old);
129  return 0;
130 }
131 
132 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
133  char *res, int res_len, int flags)
134 {
135  OverlayContext *s = ctx->priv;
136  int ret;
137 
138  if (!strcmp(cmd, "x"))
139  ret = set_expr(&s->x_pexpr, args, cmd, ctx);
140  else if (!strcmp(cmd, "y"))
141  ret = set_expr(&s->y_pexpr, args, cmd, ctx);
142  else
143  ret = AVERROR(ENOSYS);
144 
145  if (ret < 0)
146  return ret;
147 
148  if (s->eval_mode == EVAL_MODE_INIT) {
149  eval_expr(ctx);
150  av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
151  s->var_values[VAR_X], s->x,
152  s->var_values[VAR_Y], s->y);
153  }
154  return ret;
155 }
156 
157 static const enum AVPixelFormat alpha_pix_fmts[] = {
162 };
163 
165 {
166  OverlayContext *s = ctx->priv;
167 
168  /* overlay formats contains alpha, for avoiding conversion with alpha information loss */
169  static const enum AVPixelFormat main_pix_fmts_yuv420[] = {
173  };
174  static const enum AVPixelFormat overlay_pix_fmts_yuv420[] = {
176  };
177 
178  static const enum AVPixelFormat main_pix_fmts_yuv420p10[] = {
181  };
182  static const enum AVPixelFormat overlay_pix_fmts_yuv420p10[] = {
184  };
185 
186  static const enum AVPixelFormat main_pix_fmts_yuv422[] = {
188  };
189  static const enum AVPixelFormat overlay_pix_fmts_yuv422[] = {
191  };
192 
193  static const enum AVPixelFormat main_pix_fmts_yuv422p10[] = {
195  };
196  static const enum AVPixelFormat overlay_pix_fmts_yuv422p10[] = {
198  };
199 
200  static const enum AVPixelFormat main_pix_fmts_yuv444[] = {
202  };
203  static const enum AVPixelFormat overlay_pix_fmts_yuv444[] = {
205  };
206 
207  static const enum AVPixelFormat main_pix_fmts_yuv444p10[] = {
209  };
210  static const enum AVPixelFormat overlay_pix_fmts_yuv444p10[] = {
212  };
213 
214  static const enum AVPixelFormat main_pix_fmts_gbrp[] = {
216  };
217  static const enum AVPixelFormat overlay_pix_fmts_gbrp[] = {
219  };
220 
221  static const enum AVPixelFormat main_pix_fmts_rgb[] = {
226  };
227  static const enum AVPixelFormat overlay_pix_fmts_rgb[] = {
231  };
232 
233  const enum AVPixelFormat *main_formats, *overlay_formats;
235  int ret;
236 
237  switch (s->format) {
239  main_formats = main_pix_fmts_yuv420;
240  overlay_formats = overlay_pix_fmts_yuv420;
241  break;
243  main_formats = main_pix_fmts_yuv420p10;
244  overlay_formats = overlay_pix_fmts_yuv420p10;
245  break;
247  main_formats = main_pix_fmts_yuv422;
248  overlay_formats = overlay_pix_fmts_yuv422;
249  break;
251  main_formats = main_pix_fmts_yuv422p10;
252  overlay_formats = overlay_pix_fmts_yuv422p10;
253  break;
255  main_formats = main_pix_fmts_yuv444;
256  overlay_formats = overlay_pix_fmts_yuv444;
257  break;
259  main_formats = main_pix_fmts_yuv444p10;
260  overlay_formats = overlay_pix_fmts_yuv444p10;
261  break;
262  case OVERLAY_FORMAT_RGB:
263  main_formats = main_pix_fmts_rgb;
264  overlay_formats = overlay_pix_fmts_rgb;
265  break;
266  case OVERLAY_FORMAT_GBRP:
267  main_formats = main_pix_fmts_gbrp;
268  overlay_formats = overlay_pix_fmts_gbrp;
269  break;
270  case OVERLAY_FORMAT_AUTO:
272  default:
273  av_assert0(0);
274  }
275 
276  formats = ff_make_format_list(main_formats);
277  if ((ret = ff_formats_ref(formats, &ctx->inputs[MAIN]->outcfg.formats)) < 0 ||
278  (ret = ff_formats_ref(formats, &ctx->outputs[MAIN]->incfg.formats)) < 0)
279  return ret;
280 
281  return ff_formats_ref(ff_make_format_list(overlay_formats),
282  &ctx->inputs[OVERLAY]->outcfg.formats);
283 }
284 
286 {
287  AVFilterContext *ctx = inlink->dst;
288  OverlayContext *s = inlink->dst->priv;
289  int ret;
290  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
291 
292  av_image_fill_max_pixsteps(s->overlay_pix_step, NULL, pix_desc);
293 
294  /* Finish the configuration by evaluating the expressions
295  now when both inputs are configured. */
296  s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
297  s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
298  s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
299  s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
300  s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
301  s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
302  s->var_values[VAR_X] = NAN;
303  s->var_values[VAR_Y] = NAN;
304  s->var_values[VAR_N] = 0;
305  s->var_values[VAR_T] = NAN;
306 #if FF_API_FRAME_PKT
307  s->var_values[VAR_POS] = NAN;
308 #endif
309 
310  if ((ret = set_expr(&s->x_pexpr, s->x_expr, "x", ctx)) < 0 ||
311  (ret = set_expr(&s->y_pexpr, s->y_expr, "y", ctx)) < 0)
312  return ret;
313 
314  s->overlay_is_packed_rgb =
315  ff_fill_rgba_map(s->overlay_rgba_map, inlink->format) >= 0;
316  s->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
317 
318  if (s->eval_mode == EVAL_MODE_INIT) {
319  eval_expr(ctx);
320  av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
321  s->var_values[VAR_X], s->x,
322  s->var_values[VAR_Y], s->y);
323  }
324 
326  "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s\n",
327  ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
328  av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
329  ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
330  av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format));
331  return 0;
332 }
333 
334 static int config_output(AVFilterLink *outlink)
335 {
336  AVFilterContext *ctx = outlink->src;
337  OverlayContext *s = ctx->priv;
338  int ret;
339 
340  if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0)
341  return ret;
342 
343  outlink->w = ctx->inputs[MAIN]->w;
344  outlink->h = ctx->inputs[MAIN]->h;
345  outlink->time_base = ctx->inputs[MAIN]->time_base;
346 
347  return ff_framesync_configure(&s->fs);
348 }
349 
350 // divide by 255 and round to nearest
351 // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
352 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
353 
354 // calculate the unpremultiplied alpha, applying the general equation:
355 // alpha = alpha_overlay / ( (alpha_main + alpha_overlay) - (alpha_main * alpha_overlay) )
356 // (((x) << 16) - ((x) << 9) + (x)) is a faster version of: 255 * 255 * x
357 // ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)) is a faster version of: 255 * (x + y)
358 #define UNPREMULTIPLY_ALPHA(x, y) ((((x) << 16) - ((x) << 9) + (x)) / ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)))
359 
360 /**
361  * Blend image in src to destination buffer dst at position (x, y).
362  */
363 
365  AVFrame *dst, const AVFrame *src,
366  int main_has_alpha, int x, int y,
367  int is_straight, int jobnr, int nb_jobs)
368 {
369  OverlayContext *s = ctx->priv;
370  int i, imax, j, jmax;
371  const int src_w = src->width;
372  const int src_h = src->height;
373  const int dst_w = dst->width;
374  const int dst_h = dst->height;
375  uint8_t alpha; ///< the amount of overlay to blend on to main
376  const int dr = s->main_rgba_map[R];
377  const int dg = s->main_rgba_map[G];
378  const int db = s->main_rgba_map[B];
379  const int da = s->main_rgba_map[A];
380  const int dstep = s->main_pix_step[0];
381  const int sr = s->overlay_rgba_map[R];
382  const int sg = s->overlay_rgba_map[G];
383  const int sb = s->overlay_rgba_map[B];
384  const int sa = s->overlay_rgba_map[A];
385  const int sstep = s->overlay_pix_step[0];
386  int slice_start, slice_end;
387  uint8_t *S, *sp, *d, *dp;
388 
389  i = FFMAX(-y, 0);
390  imax = FFMIN3(-y + dst_h, FFMIN(src_h, dst_h), y + src_h);
391 
392  slice_start = i + (imax * jobnr) / nb_jobs;
393  slice_end = i + (imax * (jobnr+1)) / nb_jobs;
394 
395  sp = src->data[0] + (slice_start) * src->linesize[0];
396  dp = dst->data[0] + (y + slice_start) * dst->linesize[0];
397 
398  for (i = slice_start; i < slice_end; i++) {
399  j = FFMAX(-x, 0);
400  S = sp + j * sstep;
401  d = dp + (x+j) * dstep;
402 
403  for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
404  alpha = S[sa];
405 
406  // if the main channel has an alpha channel, alpha has to be calculated
407  // to create an un-premultiplied (straight) alpha value
408  if (main_has_alpha && alpha != 0 && alpha != 255) {
409  uint8_t alpha_d = d[da];
410  alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
411  }
412 
413  switch (alpha) {
414  case 0:
415  break;
416  case 255:
417  d[dr] = S[sr];
418  d[dg] = S[sg];
419  d[db] = S[sb];
420  break;
421  default:
422  // main_value = main_value * (1 - alpha) + overlay_value * alpha
423  // since alpha is in the range 0-255, the result must divided by 255
424  d[dr] = is_straight ? FAST_DIV255(d[dr] * (255 - alpha) + S[sr] * alpha) :
425  FFMIN(FAST_DIV255(d[dr] * (255 - alpha)) + S[sr], 255);
426  d[dg] = is_straight ? FAST_DIV255(d[dg] * (255 - alpha) + S[sg] * alpha) :
427  FFMIN(FAST_DIV255(d[dg] * (255 - alpha)) + S[sg], 255);
428  d[db] = is_straight ? FAST_DIV255(d[db] * (255 - alpha) + S[sb] * alpha) :
429  FFMIN(FAST_DIV255(d[db] * (255 - alpha)) + S[sb], 255);
430  }
431  if (main_has_alpha) {
432  switch (alpha) {
433  case 0:
434  break;
435  case 255:
436  d[da] = S[sa];
437  break;
438  default:
439  // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
440  d[da] += FAST_DIV255((255 - d[da]) * S[sa]);
441  }
442  }
443  d += dstep;
444  S += sstep;
445  }
446  dp += dst->linesize[0];
447  sp += src->linesize[0];
448  }
449 }
450 
451 #define DEFINE_BLEND_PLANE(depth, nbits) \
452 static av_always_inline void blend_plane_##depth##_##nbits##bits(AVFilterContext *ctx, \
453  AVFrame *dst, const AVFrame *src, \
454  int src_w, int src_h, \
455  int dst_w, int dst_h, \
456  int i, int hsub, int vsub, \
457  int x, int y, \
458  int main_has_alpha, \
459  int dst_plane, \
460  int dst_offset, \
461  int dst_step, \
462  int straight, \
463  int yuv, \
464  int jobnr, \
465  int nb_jobs) \
466 { \
467  OverlayContext *octx = ctx->priv; \
468  int src_wp = AV_CEIL_RSHIFT(src_w, hsub); \
469  int src_hp = AV_CEIL_RSHIFT(src_h, vsub); \
470  int dst_wp = AV_CEIL_RSHIFT(dst_w, hsub); \
471  int dst_hp = AV_CEIL_RSHIFT(dst_h, vsub); \
472  int yp = y>>vsub; \
473  int xp = x>>hsub; \
474  uint##depth##_t *s, *sp, *d, *dp, *dap, *a, *da, *ap; \
475  int jmax, j, k, kmax; \
476  int slice_start, slice_end; \
477  const uint##depth##_t max = (1 << nbits) - 1; \
478  const uint##depth##_t mid = (1 << (nbits -1)) ; \
479  int bytes = depth / 8; \
480  \
481  dst_step /= bytes; \
482  j = FFMAX(-yp, 0); \
483  jmax = FFMIN3(-yp + dst_hp, FFMIN(src_hp, dst_hp), yp + src_hp); \
484  \
485  slice_start = j + (jmax * jobnr) / nb_jobs; \
486  slice_end = j + (jmax * (jobnr+1)) / nb_jobs; \
487  \
488  sp = (uint##depth##_t *)(src->data[i] + (slice_start) * src->linesize[i]); \
489  dp = (uint##depth##_t *)(dst->data[dst_plane] \
490  + (yp + slice_start) * dst->linesize[dst_plane] \
491  + dst_offset); \
492  ap = (uint##depth##_t *)(src->data[3] + (slice_start << vsub) * src->linesize[3]); \
493  dap = (uint##depth##_t *)(dst->data[3] + ((yp + slice_start) << vsub) * dst->linesize[3]); \
494  \
495  for (j = slice_start; j < slice_end; j++) { \
496  k = FFMAX(-xp, 0); \
497  d = dp + (xp+k) * dst_step; \
498  s = sp + k; \
499  a = ap + (k<<hsub); \
500  da = dap + ((xp+k) << hsub); \
501  kmax = FFMIN(-xp + dst_wp, src_wp); \
502  \
503  if (nbits == 8 && ((vsub && j+1 < src_hp) || !vsub) && octx->blend_row[i]) { \
504  int c = octx->blend_row[i]((uint8_t*)d, (uint8_t*)da, (uint8_t*)s, \
505  (uint8_t*)a, kmax - k, src->linesize[3]); \
506  \
507  s += c; \
508  d += dst_step * c; \
509  da += (1 << hsub) * c; \
510  a += (1 << hsub) * c; \
511  k += c; \
512  } \
513  for (; k < kmax; k++) { \
514  int alpha_v, alpha_h, alpha; \
515  \
516  /* average alpha for color components, improve quality */ \
517  if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { \
518  alpha = (a[0] + a[src->linesize[3]] + \
519  a[1] + a[src->linesize[3]+1]) >> 2; \
520  } else if (hsub || vsub) { \
521  alpha_h = hsub && k+1 < src_wp ? \
522  (a[0] + a[1]) >> 1 : a[0]; \
523  alpha_v = vsub && j+1 < src_hp ? \
524  (a[0] + a[src->linesize[3]]) >> 1 : a[0]; \
525  alpha = (alpha_v + alpha_h) >> 1; \
526  } else \
527  alpha = a[0]; \
528  /* if the main channel has an alpha channel, alpha has to be calculated */ \
529  /* to create an un-premultiplied (straight) alpha value */ \
530  if (main_has_alpha && alpha != 0 && alpha != max) { \
531  /* average alpha for color components, improve quality */ \
532  uint8_t alpha_d; \
533  if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { \
534  alpha_d = (da[0] + da[dst->linesize[3]] + \
535  da[1] + da[dst->linesize[3]+1]) >> 2; \
536  } else if (hsub || vsub) { \
537  alpha_h = hsub && k+1 < src_wp ? \
538  (da[0] + da[1]) >> 1 : da[0]; \
539  alpha_v = vsub && j+1 < src_hp ? \
540  (da[0] + da[dst->linesize[3]]) >> 1 : da[0]; \
541  alpha_d = (alpha_v + alpha_h) >> 1; \
542  } else \
543  alpha_d = da[0]; \
544  alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); \
545  } \
546  if (straight) { \
547  if (nbits > 8) \
548  *d = (*d * (max - alpha) + *s * alpha) / max; \
549  else \
550  *d = FAST_DIV255(*d * (255 - alpha) + *s * alpha); \
551  } else { \
552  if (nbits > 8) { \
553  if (i && yuv) \
554  *d = av_clip((*d * (max - alpha) + *s * alpha) / max + *s - mid, -mid, mid) + mid; \
555  else \
556  *d = av_clip_uintp2((*d * (max - alpha) + *s * alpha) / max + *s - (16<<(nbits-8)),\
557  nbits);\
558  } else { \
559  if (i && yuv) \
560  *d = av_clip(FAST_DIV255((*d - mid) * (max - alpha)) + *s - mid, -mid, mid) + mid; \
561  else \
562  *d = av_clip_uint8(FAST_DIV255(*d * (255 - alpha)) + *s - 16); \
563  } \
564  } \
565  s++; \
566  d += dst_step; \
567  da += 1 << hsub; \
568  a += 1 << hsub; \
569  } \
570  dp += dst->linesize[dst_plane] / bytes; \
571  sp += src->linesize[i] / bytes; \
572  ap += (1 << vsub) * src->linesize[3] / bytes; \
573  dap += (1 << vsub) * dst->linesize[3] / bytes; \
574  } \
575 }
576 DEFINE_BLEND_PLANE(8, 8)
577 DEFINE_BLEND_PLANE(16, 10)
578 
579 #define DEFINE_ALPHA_COMPOSITE(depth, nbits) \
580 static inline void alpha_composite_##depth##_##nbits##bits(const AVFrame *src, const AVFrame *dst, \
581  int src_w, int src_h, \
582  int dst_w, int dst_h, \
583  int x, int y, \
584  int jobnr, int nb_jobs) \
585 { \
586  uint##depth##_t alpha; /* the amount of overlay to blend on to main */ \
587  uint##depth##_t *s, *sa, *d, *da; \
588  int i, imax, j, jmax; \
589  int slice_start, slice_end; \
590  const uint##depth##_t max = (1 << nbits) - 1; \
591  int bytes = depth / 8; \
592  \
593  imax = FFMIN3(-y + dst_h, FFMIN(src_h, dst_h), y + src_h); \
594  i = FFMAX(-y, 0); \
595  \
596  slice_start = i + (imax * jobnr) / nb_jobs; \
597  slice_end = i + ((imax * (jobnr+1)) / nb_jobs); \
598  \
599  sa = (uint##depth##_t *)(src->data[3] + (slice_start) * src->linesize[3]); \
600  da = (uint##depth##_t *)(dst->data[3] + (y + slice_start) * dst->linesize[3]); \
601  \
602  for (i = slice_start; i < slice_end; i++) { \
603  j = FFMAX(-x, 0); \
604  s = sa + j; \
605  d = da + x+j; \
606  \
607  for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) { \
608  alpha = *s; \
609  if (alpha != 0 && alpha != max) { \
610  uint8_t alpha_d = *d; \
611  alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); \
612  } \
613  if (alpha == max) \
614  *d = *s; \
615  else if (alpha > 0) { \
616  /* apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha */ \
617  if (nbits > 8) \
618  *d += (max - *d) * *s / max; \
619  else \
620  *d += FAST_DIV255((max - *d) * *s); \
621  } \
622  d += 1; \
623  s += 1; \
624  } \
625  da += dst->linesize[3] / bytes; \
626  sa += src->linesize[3] / bytes; \
627  } \
628 }
631 
632 #define DEFINE_BLEND_SLICE_YUV(depth, nbits) \
633 static av_always_inline void blend_slice_yuv_##depth##_##nbits##bits(AVFilterContext *ctx, \
634  AVFrame *dst, const AVFrame *src, \
635  int hsub, int vsub, \
636  int main_has_alpha, \
637  int x, int y, \
638  int is_straight, \
639  int jobnr, int nb_jobs) \
640 { \
641  OverlayContext *s = ctx->priv; \
642  const int src_w = src->width; \
643  const int src_h = src->height; \
644  const int dst_w = dst->width; \
645  const int dst_h = dst->height; \
646  \
647  blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, \
648  x, y, main_has_alpha, s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, \
649  s->main_desc->comp[0].step, is_straight, 1, jobnr, nb_jobs); \
650  blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, \
651  x, y, main_has_alpha, s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, \
652  s->main_desc->comp[1].step, is_straight, 1, jobnr, nb_jobs); \
653  blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, \
654  x, y, main_has_alpha, s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, \
655  s->main_desc->comp[2].step, is_straight, 1, jobnr, nb_jobs); \
656  \
657  if (main_has_alpha) \
658  alpha_composite_##depth##_##nbits##bits(src, dst, src_w, src_h, dst_w, dst_h, x, y, \
659  jobnr, nb_jobs); \
660 }
663 
665  AVFrame *dst, const AVFrame *src,
666  int hsub, int vsub,
667  int main_has_alpha,
668  int x, int y,
669  int is_straight,
670  int jobnr,
671  int nb_jobs)
672 {
673  OverlayContext *s = ctx->priv;
674  const int src_w = src->width;
675  const int src_h = src->height;
676  const int dst_w = dst->width;
677  const int dst_h = dst->height;
678 
679  blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
680  s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 0,
681  jobnr, nb_jobs);
682  blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
683  s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, s->main_desc->comp[2].step, is_straight, 0,
684  jobnr, nb_jobs);
685  blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
686  s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 0,
687  jobnr, nb_jobs);
688 
689  if (main_has_alpha)
690  alpha_composite_8_8bits(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, nb_jobs);
691 }
692 
693 static int blend_slice_yuv420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
694 {
695  OverlayContext *s = ctx->priv;
696  ThreadData *td = arg;
697  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 1, jobnr, nb_jobs);
698  return 0;
699 }
700 
701 static int blend_slice_yuva420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
702 {
703  OverlayContext *s = ctx->priv;
704  ThreadData *td = arg;
705  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 1, jobnr, nb_jobs);
706  return 0;
707 }
708 
709 static int blend_slice_yuv420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
710 {
711  OverlayContext *s = ctx->priv;
712  ThreadData *td = arg;
713  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 1, jobnr, nb_jobs);
714  return 0;
715 }
716 
717 static int blend_slice_yuva420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
718 {
719  OverlayContext *s = ctx->priv;
720  ThreadData *td = arg;
721  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 1, jobnr, nb_jobs);
722  return 0;
723 }
724 
725 static int blend_slice_yuv422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
726 {
727  OverlayContext *s = ctx->priv;
728  ThreadData *td = arg;
729  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
730  return 0;
731 }
732 
733 static int blend_slice_yuva422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
734 {
735  OverlayContext *s = ctx->priv;
736  ThreadData *td = arg;
737  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
738  return 0;
739 }
740 
741 static int blend_slice_yuv422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
742 {
743  OverlayContext *s = ctx->priv;
744  ThreadData *td = arg;
745  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
746  return 0;
747 }
748 
749 static int blend_slice_yuva422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
750 {
751  OverlayContext *s = ctx->priv;
752  ThreadData *td = arg;
753  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
754  return 0;
755 }
756 
757 static int blend_slice_yuv444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
758 {
759  OverlayContext *s = ctx->priv;
760  ThreadData *td = arg;
761  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
762  return 0;
763 }
764 
765 static int blend_slice_yuva444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
766 {
767  OverlayContext *s = ctx->priv;
768  ThreadData *td = arg;
769  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
770  return 0;
771 }
772 
773 static int blend_slice_yuv444p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
774 {
775  OverlayContext *s = ctx->priv;
776  ThreadData *td = arg;
777  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
778  return 0;
779 }
780 
781 static int blend_slice_yuva444p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
782 {
783  OverlayContext *s = ctx->priv;
784  ThreadData *td = arg;
785  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
786  return 0;
787 }
788 
789 static int blend_slice_gbrp(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
790 {
791  OverlayContext *s = ctx->priv;
792  ThreadData *td = arg;
793  blend_slice_planar_rgb(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
794  return 0;
795 }
796 
797 static int blend_slice_gbrap(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
798 {
799  OverlayContext *s = ctx->priv;
800  ThreadData *td = arg;
801  blend_slice_planar_rgb(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
802  return 0;
803 }
804 
805 static int blend_slice_yuv420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
806 {
807  OverlayContext *s = ctx->priv;
808  ThreadData *td = arg;
809  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 0, jobnr, nb_jobs);
810  return 0;
811 }
812 
813 static int blend_slice_yuva420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
814 {
815  OverlayContext *s = ctx->priv;
816  ThreadData *td = arg;
817  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 0, jobnr, nb_jobs);
818  return 0;
819 }
820 
821 static int blend_slice_yuv422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
822 {
823  OverlayContext *s = ctx->priv;
824  ThreadData *td = arg;
825  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
826  return 0;
827 }
828 
829 static int blend_slice_yuva422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
830 {
831  OverlayContext *s = ctx->priv;
832  ThreadData *td = arg;
833  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
834  return 0;
835 }
836 
837 static int blend_slice_yuv444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
838 {
839  OverlayContext *s = ctx->priv;
840  ThreadData *td = arg;
841  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
842  return 0;
843 }
844 
845 static int blend_slice_yuva444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
846 {
847  OverlayContext *s = ctx->priv;
848  ThreadData *td = arg;
849  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
850  return 0;
851 }
852 
853 static int blend_slice_gbrp_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
854 {
855  OverlayContext *s = ctx->priv;
856  ThreadData *td = arg;
857  blend_slice_planar_rgb(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
858  return 0;
859 }
860 
861 static int blend_slice_gbrap_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
862 {
863  OverlayContext *s = ctx->priv;
864  ThreadData *td = arg;
865  blend_slice_planar_rgb(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
866  return 0;
867 }
868 
869 static int blend_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
870 {
871  OverlayContext *s = ctx->priv;
872  ThreadData *td = arg;
873  blend_slice_packed_rgb(ctx, td->dst, td->src, 0, s->x, s->y, 1, jobnr, nb_jobs);
874  return 0;
875 }
876 
877 static int blend_slice_rgba(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
878 {
879  OverlayContext *s = ctx->priv;
880  ThreadData *td = arg;
881  blend_slice_packed_rgb(ctx, td->dst, td->src, 1, s->x, s->y, 1, jobnr, nb_jobs);
882  return 0;
883 }
884 
885 static int blend_slice_rgb_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
886 {
887  OverlayContext *s = ctx->priv;
888  ThreadData *td = arg;
889  blend_slice_packed_rgb(ctx, td->dst, td->src, 0, s->x, s->y, 0, jobnr, nb_jobs);
890  return 0;
891 }
892 
893 static int blend_slice_rgba_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
894 {
895  OverlayContext *s = ctx->priv;
896  ThreadData *td = arg;
897  blend_slice_packed_rgb(ctx, td->dst, td->src, 1, s->x, s->y, 0, jobnr, nb_jobs);
898  return 0;
899 }
900 
902 {
903  OverlayContext *s = inlink->dst->priv;
904  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
905 
906  av_image_fill_max_pixsteps(s->main_pix_step, NULL, pix_desc);
907 
908  s->hsub = pix_desc->log2_chroma_w;
909  s->vsub = pix_desc->log2_chroma_h;
910 
911  s->main_desc = pix_desc;
912 
913  s->main_is_packed_rgb =
914  ff_fill_rgba_map(s->main_rgba_map, inlink->format) >= 0;
915  s->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
916  switch (s->format) {
918  s->blend_slice = s->main_has_alpha ? blend_slice_yuva420 : blend_slice_yuv420;
919  break;
921  s->blend_slice = s->main_has_alpha ? blend_slice_yuva420p10 : blend_slice_yuv420p10;
922  break;
924  s->blend_slice = s->main_has_alpha ? blend_slice_yuva422 : blend_slice_yuv422;
925  break;
927  s->blend_slice = s->main_has_alpha ? blend_slice_yuva422p10 : blend_slice_yuv422p10;
928  break;
930  s->blend_slice = s->main_has_alpha ? blend_slice_yuva444 : blend_slice_yuv444;
931  break;
933  s->blend_slice = s->main_has_alpha ? blend_slice_yuva444p10 : blend_slice_yuv444p10;
934  break;
935  case OVERLAY_FORMAT_RGB:
936  s->blend_slice = s->main_has_alpha ? blend_slice_rgba : blend_slice_rgb;
937  break;
938  case OVERLAY_FORMAT_GBRP:
939  s->blend_slice = s->main_has_alpha ? blend_slice_gbrap : blend_slice_gbrp;
940  break;
941  case OVERLAY_FORMAT_AUTO:
942  switch (inlink->format) {
943  case AV_PIX_FMT_YUVA420P:
944  s->blend_slice = blend_slice_yuva420;
945  break;
947  s->blend_slice = blend_slice_yuva420p10;
948  break;
949  case AV_PIX_FMT_YUVA422P:
950  s->blend_slice = blend_slice_yuva422;
951  break;
953  s->blend_slice = blend_slice_yuva422p10;
954  break;
955  case AV_PIX_FMT_YUVA444P:
956  s->blend_slice = blend_slice_yuva444;
957  break;
959  s->blend_slice = blend_slice_yuva444p10;
960  break;
961  case AV_PIX_FMT_ARGB:
962  case AV_PIX_FMT_RGBA:
963  case AV_PIX_FMT_BGRA:
964  case AV_PIX_FMT_ABGR:
965  s->blend_slice = blend_slice_rgba;
966  break;
967  case AV_PIX_FMT_GBRAP:
968  s->blend_slice = blend_slice_gbrap;
969  break;
970  default:
971  av_assert0(0);
972  break;
973  }
974  break;
975  }
976 
977  if (!s->alpha_format)
978  goto end;
979 
980  switch (s->format) {
982  s->blend_slice = s->main_has_alpha ? blend_slice_yuva420_pm : blend_slice_yuv420_pm;
983  break;
985  s->blend_slice = s->main_has_alpha ? blend_slice_yuva422_pm : blend_slice_yuv422_pm;
986  break;
988  s->blend_slice = s->main_has_alpha ? blend_slice_yuva444_pm : blend_slice_yuv444_pm;
989  break;
990  case OVERLAY_FORMAT_RGB:
991  s->blend_slice = s->main_has_alpha ? blend_slice_rgba_pm : blend_slice_rgb_pm;
992  break;
993  case OVERLAY_FORMAT_GBRP:
994  s->blend_slice = s->main_has_alpha ? blend_slice_gbrap_pm : blend_slice_gbrp_pm;
995  break;
996  case OVERLAY_FORMAT_AUTO:
997  switch (inlink->format) {
998  case AV_PIX_FMT_YUVA420P:
999  s->blend_slice = blend_slice_yuva420_pm;
1000  break;
1001  case AV_PIX_FMT_YUVA422P:
1002  s->blend_slice = blend_slice_yuva422_pm;
1003  break;
1004  case AV_PIX_FMT_YUVA444P:
1005  s->blend_slice = blend_slice_yuva444_pm;
1006  break;
1007  case AV_PIX_FMT_ARGB:
1008  case AV_PIX_FMT_RGBA:
1009  case AV_PIX_FMT_BGRA:
1010  case AV_PIX_FMT_ABGR:
1011  s->blend_slice = blend_slice_rgba_pm;
1012  break;
1013  case AV_PIX_FMT_GBRAP:
1014  s->blend_slice = blend_slice_gbrap_pm;
1015  break;
1016  default:
1017  av_assert0(0);
1018  break;
1019  }
1020  break;
1021  }
1022 
1023 end:
1024 #if ARCH_X86
1025  ff_overlay_init_x86(s, s->format, inlink->format,
1026  s->alpha_format, s->main_has_alpha);
1027 #endif
1028 
1029  return 0;
1030 }
1031 
1033 {
1034  AVFilterContext *ctx = fs->parent;
1035  AVFrame *mainpic, *second;
1036  OverlayContext *s = ctx->priv;
1037  AVFilterLink *inlink = ctx->inputs[0];
1038  int ret;
1039 
1040  ret = ff_framesync_dualinput_get_writable(fs, &mainpic, &second);
1041  if (ret < 0)
1042  return ret;
1043  if (!second)
1044  return ff_filter_frame(ctx->outputs[0], mainpic);
1045 
1046  if (s->eval_mode == EVAL_MODE_FRAME) {
1047 
1048  s->var_values[VAR_N] = inlink->frame_count_out;
1049  s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
1050  NAN : mainpic->pts * av_q2d(inlink->time_base);
1051 #if FF_API_FRAME_PKT
1053  {
1054  int64_t pos = mainpic->pkt_pos;
1055  s->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1056  }
1058 #endif
1059 
1060  s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = second->width;
1061  s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = second->height;
1062  s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = mainpic->width;
1063  s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = mainpic->height;
1064 
1065  eval_expr(ctx);
1066  av_log(ctx, AV_LOG_DEBUG, "n:%f t:%f x:%f xi:%d y:%f yi:%d\n",
1067  s->var_values[VAR_N], s->var_values[VAR_T],
1068  s->var_values[VAR_X], s->x,
1069  s->var_values[VAR_Y], s->y);
1070  }
1071 
1072  if (s->x < mainpic->width && s->x + second->width >= 0 &&
1073  s->y < mainpic->height && s->y + second->height >= 0) {
1074  ThreadData td;
1075 
1076  td.dst = mainpic;
1077  td.src = second;
1078  ff_filter_execute(ctx, s->blend_slice, &td, NULL, FFMIN(FFMAX(1, FFMIN3(s->y + second->height, FFMIN(second->height, mainpic->height), mainpic->height - s->y)),
1080  }
1081  return ff_filter_frame(ctx->outputs[0], mainpic);
1082 }
1083 
1085 {
1086  OverlayContext *s = ctx->priv;
1087 
1088  s->fs.on_event = do_blend;
1089  return 0;
1090 }
1091 
1093 {
1094  OverlayContext *s = ctx->priv;
1095  return ff_framesync_activate(&s->fs);
1096 }
1097 
1098 #define OFFSET(x) offsetof(OverlayContext, x)
1099 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1100 
1101 static const AVOption overlay_options[] = {
1102  { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, 0, 0, FLAGS },
1103  { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, 0, 0, FLAGS },
1104  { "eof_action", "Action to take when encountering EOF from secondary input ",
1105  OFFSET(fs.opt_eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
1106  EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" },
1107  { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
1108  { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
1109  { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" },
1110  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_FRAME}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
1111  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
1112  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
1113  { "shortest", "force termination when the shortest input terminates", OFFSET(fs.opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
1114  { "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, "format" },
1115  { "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" },
1116  { "yuv420p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420P10}, .flags = FLAGS, .unit = "format" },
1117  { "yuv422", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422}, .flags = FLAGS, .unit = "format" },
1118  { "yuv422p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422P10}, .flags = FLAGS, .unit = "format" },
1119  { "yuv444", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444}, .flags = FLAGS, .unit = "format" },
1120  { "yuv444p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444P10}, .flags = FLAGS, .unit = "format" },
1121  { "rgb", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" },
1122  { "gbrp", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_GBRP}, .flags = FLAGS, .unit = "format" },
1123  { "auto", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_AUTO}, .flags = FLAGS, .unit = "format" },
1124  { "repeatlast", "repeat overlay of the last overlay frame", OFFSET(fs.opt_repeatlast), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
1125  { "alpha", "alpha format", OFFSET(alpha_format), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "alpha_format" },
1126  { "straight", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, .flags = FLAGS, .unit = "alpha_format" },
1127  { "premultiplied", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, .flags = FLAGS, .unit = "alpha_format" },
1128  { NULL }
1129 };
1130 
1132 
1134  {
1135  .name = "main",
1136  .type = AVMEDIA_TYPE_VIDEO,
1137  .config_props = config_input_main,
1138  },
1139  {
1140  .name = "overlay",
1141  .type = AVMEDIA_TYPE_VIDEO,
1142  .config_props = config_input_overlay,
1143  },
1144 };
1145 
1147  {
1148  .name = "default",
1149  .type = AVMEDIA_TYPE_VIDEO,
1150  .config_props = config_output,
1151  },
1152 };
1153 
1155  .name = "overlay",
1156  .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
1157  .preinit = overlay_framesync_preinit,
1158  .init = init,
1159  .uninit = uninit,
1160  .priv_size = sizeof(OverlayContext),
1161  .priv_class = &overlay_class,
1162  .activate = activate,
1169 };
formats
formats
Definition: signature.h:48
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
VAR_MAIN_H
@ VAR_MAIN_H
Definition: vf_drawtext.c:137
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:134
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
blend_slice_packed_rgb
static av_always_inline void blend_slice_packed_rgb(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int main_has_alpha, int x, int y, int is_straight, int jobnr, int nb_jobs)
Blend image in src to destination buffer dst at position (x, y).
Definition: vf_overlay.c:364
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
VAR_OH
@ VAR_OH
Definition: scale_eval.c:46
blend_slice_rgb
static int blend_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:869
set_expr
static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
Definition: vf_overlay.c:111
OVERLAY
#define OVERLAY
Definition: vf_overlay.c:66
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:401
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:304
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
blend_slice_yuv422
static int blend_slice_yuv422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:741
avfilter_vf_overlay_outputs
static const AVFilterPad avfilter_vf_overlay_outputs[]
Definition: vf_overlay.c:1146
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
blend_slice_yuv422p10
static int blend_slice_yuv422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:725
blend_slice_yuva422_pm
static int blend_slice_yuva422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:829
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
AVFrame::width
int width
Definition: frame.h:412
do_blend
static int do_blend(FFFrameSync *fs)
Definition: vf_overlay.c:1032
blend_slice_yuv444p10
static int blend_slice_yuv444p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:773
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:506
AVOption
AVOption.
Definition: opt.h:251
EOF_ACTION_ENDALL
@ EOF_ACTION_ENDALL
Definition: framesync.h:28
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:169
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
VAR_HSUB
@ VAR_HSUB
Definition: boxblur.c:40
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
mathematics.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
blend_slice_yuv420
static int blend_slice_yuv420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:693
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
FFFrameSync
Frame sync structure.
Definition: framesync.h:168
blend_slice_yuva422p10
static int blend_slice_yuva422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:733
video.h
VAR_MAIN_W
@ VAR_MAIN_W
Definition: vf_drawtext.c:138
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:507
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:73
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
blend_slice_gbrap
static int blend_slice_gbrap(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:797
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
var_names
static const char *const var_names[]
Definition: vf_overlay.c:48
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
VAR_T
@ VAR_T
Definition: aeval.c:54
VAR_VSUB
@ VAR_VSUB
Definition: boxblur.c:41
OVERLAY_FORMAT_RGB
@ OVERLAY_FORMAT_RGB
Definition: vf_overlay.h:51
blend_slice_gbrp_pm
static int blend_slice_gbrp_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:853
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
FAST_DIV255
#define FAST_DIV255(x)
Definition: vf_overlay.c:352
OVERLAY_FORMAT_YUV422P10
@ OVERLAY_FORMAT_YUV422P10
Definition: vf_overlay.h:48
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
R
#define R
Definition: vf_overlay.c:68
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
ff_overlay_init_x86
void ff_overlay_init_x86(OverlayContext *s, int format, int pix_format, int alpha_format, int main_has_alpha)
Definition: vf_overlay_init.c:35
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
config_input_main
static int config_input_main(AVFilterLink *inlink)
Definition: vf_overlay.c:901
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_overlay.c:164
av_cold
#define av_cold
Definition: attributes.h:90
blend_slice_rgb_pm
static int blend_slice_rgb_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:885
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_overlay.c:79
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
OVERLAY_FORMAT_YUV422
@ OVERLAY_FORMAT_YUV422
Definition: vf_overlay.h:47
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:617
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1979
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:776
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
VAR_MW
@ VAR_MW
Definition: vf_overlay.h:28
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
UNPREMULTIPLY_ALPHA
#define UNPREMULTIPLY_ALPHA(x, y)
Definition: vf_overlay.c:358
AVExpr
Definition: eval.c:157
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
vf_overlay.h
B
#define B
Definition: vf_overlay.c:70
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
blend_slice_yuva420p10
static int blend_slice_yuva420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:717
eval_expr
static void eval_expr(AVFilterContext *ctx)
Definition: vf_overlay.c:99
EOF_ACTION_PASS
@ EOF_ACTION_PASS
Definition: framesync.h:29
NAN
#define NAN
Definition: mathematics.h:115
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
arg
const char * arg
Definition: jacosubdec.c:67
ff_vf_overlay
const AVFilter ff_vf_overlay
Definition: vf_overlay.c:1154
option
option
Definition: libkvazaar.c:320
ThreadData::dst
AVFrame * dst
Definition: vf_blend.c:56
config_input_overlay
static int config_input_overlay(AVFilterLink *inlink)
Definition: vf_overlay.c:285
NULL
#define NULL
Definition: coverity.c:32
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_overlay.c:80
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:200
ThreadData::src
const uint8_t * src
Definition: vf_bm3d.c:54
blend_slice_yuva444_pm
static int blend_slice_yuva444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:845
isnan
#define isnan(x)
Definition: libm.h:340
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_overlay.c:83
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
OverlayContext
Definition: vf_overlay.h:57
VAR_POS
@ VAR_POS
Definition: noise_bsf.c:55
ff_fmt_is_in
int ff_fmt_is_in(int fmt, const int *fmts)
Tell if an integer is contained in the provided -1-terminated list of integers.
Definition: formats.c:372
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:469
blend_slice_yuv420_pm
static int blend_slice_yuv420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:805
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
avfilter_vf_overlay_inputs
static const AVFilterPad avfilter_vf_overlay_inputs[]
Definition: vf_overlay.c:1133
VAR_Y
@ VAR_Y
Definition: vf_blend.c:52
eval.h
blend_slice_yuv420p10
static int blend_slice_yuv420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:709
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
A
#define A
Definition: vf_overlay.c:71
overlay_options
static const AVOption overlay_options[]
Definition: vf_overlay.c:1101
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:372
VAR_N
@ VAR_N
Definition: noise_bsf.c:47
OVERLAY_FORMAT_NB
@ OVERLAY_FORMAT_NB
Definition: vf_overlay.h:54
OVERLAY_FORMAT_YUV420P10
@ OVERLAY_FORMAT_YUV420P10
Definition: vf_overlay.h:46
sp
#define sp
Definition: regdef.h:63
OVERLAY_FORMAT_YUV420
@ OVERLAY_FORMAT_YUV420
Definition: vf_overlay.h:45
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
VAR_MH
@ VAR_MH
Definition: vf_overlay.h:29
AVFrame::pkt_pos
attribute_deprecated int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:687
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_overlay.c:334
OVERLAY_FORMAT_YUV444P10
@ OVERLAY_FORMAT_YUV444P10
Definition: vf_overlay.h:50
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:508
OVERLAY_FORMAT_AUTO
@ OVERLAY_FORMAT_AUTO
Definition: vf_overlay.h:53
DEFINE_ALPHA_COMPOSITE
#define DEFINE_ALPHA_COMPOSITE(depth, nbits)
Definition: vf_overlay.c:579
blend_slice_rgba
static int blend_slice_rgba(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:877
internal.h
blend_slice_yuva420_pm
static int blend_slice_yuva420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:813
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
VAR_X
@ VAR_X
Definition: vf_blend.c:52
FLAGS
#define FLAGS
Definition: vf_overlay.c:1099
blend_slice_gbrap_pm
static int blend_slice_gbrap_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:861
blend_slice_yuva444
static int blend_slice_yuva444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:765
blend_slice_rgba_pm
static int blend_slice_rgba_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:893
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
blend_slice_planar_rgb
static av_always_inline void blend_slice_planar_rgb(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int hsub, int vsub, int main_has_alpha, int x, int y, int is_straight, int jobnr, int nb_jobs)
Definition: vf_overlay.c:664
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
common.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:786
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
EvalMode
EvalMode
Definition: af_volume.h:39
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_NV21
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
Definition: pixfmt.h:90
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
blend_slice_gbrp
static int blend_slice_gbrp(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:789
VAR_OVERLAY_H
@ VAR_OVERLAY_H
Definition: vf_overlay.h:31
VAR_OW
@ VAR_OW
Definition: scale_eval.c:45
normalize_xy
static int normalize_xy(double d, int chroma_sub)
Definition: vf_overlay.c:92
blend_slice_yuv444
static int blend_slice_yuv444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:757
AVFilter
Filter definition.
Definition: avfilter.h:166
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_overlay.c:132
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
DEFINE_BLEND_PLANE
#define DEFINE_BLEND_PLANE(depth, nbits)
Definition: vf_overlay.c:451
blend_slice_yuva444p10
static int blend_slice_yuva444p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:781
MAIN
#define MAIN
Definition: vf_overlay.c:65
pos
unsigned int pos
Definition: spdifenc.c:413
EOF_ACTION_REPEAT
@ EOF_ACTION_REPEAT
Definition: framesync.h:27
blend_slice_yuva422
static int blend_slice_yuva422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:749
AVFrame::height
int height
Definition: frame.h:412
framesync.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
activate
static int activate(AVFilterContext *ctx)
Definition: vf_overlay.c:1092
G
#define G
Definition: vf_overlay.c:69
av_image_fill_max_pixsteps
void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], const AVPixFmtDescriptor *pixdesc)
Compute the max pixel step for each plane of an image with a format described by pixdesc.
Definition: imgutils.c:35
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
blend_slice_yuv444_pm
static int blend_slice_yuv444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:837
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
OVERLAY_FORMAT_GBRP
@ OVERLAY_FORMAT_GBRP
Definition: vf_overlay.h:52
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
OVERLAY_FORMAT_YUV444
@ OVERLAY_FORMAT_YUV444
Definition: vf_overlay.h:49
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FRAMESYNC_DEFINE_CLASS
FRAMESYNC_DEFINE_CLASS(overlay, OverlayContext, fs)
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_overlay.c:78
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
DEFINE_BLEND_SLICE_YUV
#define DEFINE_BLEND_SLICE_YUV(depth, nbits)
Definition: vf_overlay.c:632
d
d
Definition: ffmpeg_filter.c:368
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
imgutils.h
timestamp.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:355
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
ff_framesync_dualinput_get_writable
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
Definition: framesync.c:410
drawutils.h
alpha_pix_fmts
static enum AVPixelFormat alpha_pix_fmts[]
Definition: vf_overlay.c:157
OFFSET
#define OFFSET(x)
Definition: vf_overlay.c:1098
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:144
VAR_OVERLAY_W
@ VAR_OVERLAY_W
Definition: vf_overlay.h:30
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
blend_slice_yuva420
static int blend_slice_yuva420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:701
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2884
blend_slice_yuv422_pm
static int blend_slice_yuv422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:821
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_overlay.c:1084