FFmpeg
vf_overlay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  * Copyright (c) 2010 Baptiste Coudurier
4  * Copyright (c) 2007 Bobby Bingham
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * overlay one video on top of another
26  */
27 
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "libavutil/common.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timestamp.h"
38 #include "internal.h"
39 #include "drawutils.h"
40 #include "framesync.h"
41 #include "video.h"
42 #include "vf_overlay.h"
43 
44 typedef struct ThreadData {
45  AVFrame *dst, *src;
46 } ThreadData;
47 
48 static const char *const var_names[] = {
49  "main_w", "W", ///< width of the main video
50  "main_h", "H", ///< height of the main video
51  "overlay_w", "w", ///< width of the overlay video
52  "overlay_h", "h", ///< height of the overlay video
53  "hsub",
54  "vsub",
55  "x",
56  "y",
57  "n", ///< number of frame
58  "pos", ///< position in the file
59  "t", ///< timestamp expressed in seconds
60  NULL
61 };
62 
63 #define MAIN 0
64 #define OVERLAY 1
65 
66 #define R 0
67 #define G 1
68 #define B 2
69 #define A 3
70 
71 #define Y 0
72 #define U 1
73 #define V 2
74 
75 enum EvalMode {
79 };
80 
82 {
83  OverlayContext *s = ctx->priv;
84 
86  av_expr_free(s->x_pexpr); s->x_pexpr = NULL;
87  av_expr_free(s->y_pexpr); s->y_pexpr = NULL;
88 }
89 
90 static inline int normalize_xy(double d, int chroma_sub)
91 {
92  if (isnan(d))
93  return INT_MAX;
94  return (int)d & ~((1 << chroma_sub) - 1);
95 }
96 
98 {
99  OverlayContext *s = ctx->priv;
100 
103  /* It is necessary if x is expressed from y */
105  s->x = normalize_xy(s->var_values[VAR_X], s->hsub);
106  s->y = normalize_xy(s->var_values[VAR_Y], s->vsub);
107 }
108 
109 static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
110 {
111  int ret;
112  AVExpr *old = NULL;
113 
114  if (*pexpr)
115  old = *pexpr;
116  ret = av_expr_parse(pexpr, expr, var_names,
117  NULL, NULL, NULL, NULL, 0, log_ctx);
118  if (ret < 0) {
119  av_log(log_ctx, AV_LOG_ERROR,
120  "Error when evaluating the expression '%s' for %s\n",
121  expr, option);
122  *pexpr = old;
123  return ret;
124  }
125 
126  av_expr_free(old);
127  return 0;
128 }
129 
130 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
131  char *res, int res_len, int flags)
132 {
133  OverlayContext *s = ctx->priv;
134  int ret;
135 
136  if (!strcmp(cmd, "x"))
137  ret = set_expr(&s->x_pexpr, args, cmd, ctx);
138  else if (!strcmp(cmd, "y"))
139  ret = set_expr(&s->y_pexpr, args, cmd, ctx);
140  else
141  ret = AVERROR(ENOSYS);
142 
143  if (ret < 0)
144  return ret;
145 
146  if (s->eval_mode == EVAL_MODE_INIT) {
147  eval_expr(ctx);
148  av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
149  s->var_values[VAR_X], s->x,
150  s->var_values[VAR_Y], s->y);
151  }
152  return ret;
153 }
154 
155 static const enum AVPixelFormat alpha_pix_fmts[] = {
160 };
161 
163 {
164  OverlayContext *s = ctx->priv;
165 
166  /* overlay formats contains alpha, for avoiding conversion with alpha information loss */
167  static const enum AVPixelFormat main_pix_fmts_yuv420[] = {
171  };
172  static const enum AVPixelFormat overlay_pix_fmts_yuv420[] = {
173  AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE
174  };
175 
176  static const enum AVPixelFormat main_pix_fmts_yuv420p10[] = {
178  AV_PIX_FMT_NONE
179  };
180  static const enum AVPixelFormat overlay_pix_fmts_yuv420p10[] = {
181  AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_NONE
182  };
183 
184  static const enum AVPixelFormat main_pix_fmts_yuv422[] = {
186  };
187  static const enum AVPixelFormat overlay_pix_fmts_yuv422[] = {
188  AV_PIX_FMT_YUVA422P, AV_PIX_FMT_NONE
189  };
190 
191  static const enum AVPixelFormat main_pix_fmts_yuv422p10[] = {
193  };
194  static const enum AVPixelFormat overlay_pix_fmts_yuv422p10[] = {
195  AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_NONE
196  };
197 
198  static const enum AVPixelFormat main_pix_fmts_yuv444[] = {
200  };
201  static const enum AVPixelFormat overlay_pix_fmts_yuv444[] = {
202  AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE
203  };
204 
205  static const enum AVPixelFormat main_pix_fmts_gbrp[] = {
206  AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
207  };
208  static const enum AVPixelFormat overlay_pix_fmts_gbrp[] = {
209  AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
210  };
211 
212  static const enum AVPixelFormat main_pix_fmts_rgb[] = {
216  AV_PIX_FMT_NONE
217  };
218  static const enum AVPixelFormat overlay_pix_fmts_rgb[] = {
221  AV_PIX_FMT_NONE
222  };
223 
224  AVFilterFormats *main_formats = NULL;
225  AVFilterFormats *overlay_formats = NULL;
226  int ret;
227 
228  switch (s->format) {
230  if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv420)) ||
231  !(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv420))) {
232  ret = AVERROR(ENOMEM);
233  goto fail;
234  }
235  break;
237  if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv420p10)) ||
238  !(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv420p10))) {
239  ret = AVERROR(ENOMEM);
240  goto fail;
241  }
242  break;
244  if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv422)) ||
245  !(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv422))) {
246  ret = AVERROR(ENOMEM);
247  goto fail;
248  }
249  break;
251  if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv422p10)) ||
252  !(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv422p10))) {
253  ret = AVERROR(ENOMEM);
254  goto fail;
255  }
256  break;
258  if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv444)) ||
259  !(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv444))) {
260  ret = AVERROR(ENOMEM);
261  goto fail;
262  }
263  break;
264  case OVERLAY_FORMAT_RGB:
265  if (!(main_formats = ff_make_format_list(main_pix_fmts_rgb)) ||
266  !(overlay_formats = ff_make_format_list(overlay_pix_fmts_rgb))) {
267  ret = AVERROR(ENOMEM);
268  goto fail;
269  }
270  break;
271  case OVERLAY_FORMAT_GBRP:
272  if (!(main_formats = ff_make_format_list(main_pix_fmts_gbrp)) ||
273  !(overlay_formats = ff_make_format_list(overlay_pix_fmts_gbrp))) {
274  ret = AVERROR(ENOMEM);
275  goto fail;
276  }
277  break;
278  case OVERLAY_FORMAT_AUTO:
279  if (!(main_formats = ff_make_format_list(alpha_pix_fmts))) {
280  ret = AVERROR(ENOMEM);
281  goto fail;
282  }
283  break;
284  default:
285  av_assert0(0);
286  }
287 
288  if (s->format == OVERLAY_FORMAT_AUTO) {
289  ret = ff_set_common_formats(ctx, main_formats);
290  if (ret < 0)
291  goto fail;
292  } else {
293  if ((ret = ff_formats_ref(main_formats , &ctx->inputs[MAIN]->out_formats )) < 0 ||
294  (ret = ff_formats_ref(overlay_formats, &ctx->inputs[OVERLAY]->out_formats)) < 0 ||
295  (ret = ff_formats_ref(main_formats , &ctx->outputs[MAIN]->in_formats )) < 0)
296  goto fail;
297  }
298 
299  return 0;
300 fail:
301  if (main_formats)
302  av_freep(&main_formats->formats);
303  av_freep(&main_formats);
304  if (overlay_formats)
305  av_freep(&overlay_formats->formats);
306  av_freep(&overlay_formats);
307  return ret;
308 }
309 
311 {
312  AVFilterContext *ctx = inlink->dst;
313  OverlayContext *s = inlink->dst->priv;
314  int ret;
315  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
316 
318 
319  /* Finish the configuration by evaluating the expressions
320  now when both inputs are configured. */
321  s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
322  s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
325  s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
326  s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
327  s->var_values[VAR_X] = NAN;
328  s->var_values[VAR_Y] = NAN;
329  s->var_values[VAR_N] = 0;
330  s->var_values[VAR_T] = NAN;
331  s->var_values[VAR_POS] = NAN;
332 
333  if ((ret = set_expr(&s->x_pexpr, s->x_expr, "x", ctx)) < 0 ||
334  (ret = set_expr(&s->y_pexpr, s->y_expr, "y", ctx)) < 0)
335  return ret;
336 
338  ff_fill_rgba_map(s->overlay_rgba_map, inlink->format) >= 0;
340 
341  if (s->eval_mode == EVAL_MODE_INIT) {
342  eval_expr(ctx);
343  av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
344  s->var_values[VAR_X], s->x,
345  s->var_values[VAR_Y], s->y);
346  }
347 
348  av_log(ctx, AV_LOG_VERBOSE,
349  "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s\n",
350  ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
352  ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
354  return 0;
355 }
356 
357 static int config_output(AVFilterLink *outlink)
358 {
359  AVFilterContext *ctx = outlink->src;
360  OverlayContext *s = ctx->priv;
361  int ret;
362 
363  if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0)
364  return ret;
365 
366  outlink->w = ctx->inputs[MAIN]->w;
367  outlink->h = ctx->inputs[MAIN]->h;
368  outlink->time_base = ctx->inputs[MAIN]->time_base;
369 
370  return ff_framesync_configure(&s->fs);
371 }
372 
373 // divide by 255 and round to nearest
374 // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
375 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
376 
377 // calculate the unpremultiplied alpha, applying the general equation:
378 // alpha = alpha_overlay / ( (alpha_main + alpha_overlay) - (alpha_main * alpha_overlay) )
379 // (((x) << 16) - ((x) << 9) + (x)) is a faster version of: 255 * 255 * x
380 // ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)) is a faster version of: 255 * (x + y)
381 #define UNPREMULTIPLY_ALPHA(x, y) ((((x) << 16) - ((x) << 9) + (x)) / ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)))
382 
383 /**
384  * Blend image in src to destination buffer dst at position (x, y).
385  */
386 
388  AVFrame *dst, const AVFrame *src,
389  int main_has_alpha, int x, int y,
390  int is_straight, int jobnr, int nb_jobs)
391 {
392  OverlayContext *s = ctx->priv;
393  int i, imax, j, jmax;
394  const int src_w = src->width;
395  const int src_h = src->height;
396  const int dst_w = dst->width;
397  const int dst_h = dst->height;
398  uint8_t alpha; ///< the amount of overlay to blend on to main
399  const int dr = s->main_rgba_map[R];
400  const int dg = s->main_rgba_map[G];
401  const int db = s->main_rgba_map[B];
402  const int da = s->main_rgba_map[A];
403  const int dstep = s->main_pix_step[0];
404  const int sr = s->overlay_rgba_map[R];
405  const int sg = s->overlay_rgba_map[G];
406  const int sb = s->overlay_rgba_map[B];
407  const int sa = s->overlay_rgba_map[A];
408  const int sstep = s->overlay_pix_step[0];
409  int slice_start, slice_end;
410  uint8_t *S, *sp, *d, *dp;
411 
412  i = FFMAX(-y, 0);
413  imax = FFMIN3(-y + dst_h, FFMIN(src_h, dst_h), y + src_h);
414 
415  slice_start = i + (imax * jobnr) / nb_jobs;
416  slice_end = i + (imax * (jobnr+1)) / nb_jobs;
417 
418  sp = src->data[0] + (slice_start) * src->linesize[0];
419  dp = dst->data[0] + (y + slice_start) * dst->linesize[0];
420 
421  for (i = slice_start; i < slice_end; i++) {
422  j = FFMAX(-x, 0);
423  S = sp + j * sstep;
424  d = dp + (x+j) * dstep;
425 
426  for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
427  alpha = S[sa];
428 
429  // if the main channel has an alpha channel, alpha has to be calculated
430  // to create an un-premultiplied (straight) alpha value
431  if (main_has_alpha && alpha != 0 && alpha != 255) {
432  uint8_t alpha_d = d[da];
433  alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
434  }
435 
436  switch (alpha) {
437  case 0:
438  break;
439  case 255:
440  d[dr] = S[sr];
441  d[dg] = S[sg];
442  d[db] = S[sb];
443  break;
444  default:
445  // main_value = main_value * (1 - alpha) + overlay_value * alpha
446  // since alpha is in the range 0-255, the result must divided by 255
447  d[dr] = is_straight ? FAST_DIV255(d[dr] * (255 - alpha) + S[sr] * alpha) :
448  FFMIN(FAST_DIV255(d[dr] * (255 - alpha)) + S[sr], 255);
449  d[dg] = is_straight ? FAST_DIV255(d[dg] * (255 - alpha) + S[sg] * alpha) :
450  FFMIN(FAST_DIV255(d[dg] * (255 - alpha)) + S[sg], 255);
451  d[db] = is_straight ? FAST_DIV255(d[db] * (255 - alpha) + S[sb] * alpha) :
452  FFMIN(FAST_DIV255(d[db] * (255 - alpha)) + S[sb], 255);
453  }
454  if (main_has_alpha) {
455  switch (alpha) {
456  case 0:
457  break;
458  case 255:
459  d[da] = S[sa];
460  break;
461  default:
462  // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
463  d[da] += FAST_DIV255((255 - d[da]) * S[sa]);
464  }
465  }
466  d += dstep;
467  S += sstep;
468  }
469  dp += dst->linesize[0];
470  sp += src->linesize[0];
471  }
472 }
473 
474 #define DEFINE_BLEND_PLANE(depth, nbits) \
475 static av_always_inline void blend_plane_##depth##_##nbits##bits(AVFilterContext *ctx, \
476  AVFrame *dst, const AVFrame *src, \
477  int src_w, int src_h, \
478  int dst_w, int dst_h, \
479  int i, int hsub, int vsub, \
480  int x, int y, \
481  int main_has_alpha, \
482  int dst_plane, \
483  int dst_offset, \
484  int dst_step, \
485  int straight, \
486  int yuv, \
487  int jobnr, \
488  int nb_jobs) \
489 { \
490  OverlayContext *octx = ctx->priv; \
491  int src_wp = AV_CEIL_RSHIFT(src_w, hsub); \
492  int src_hp = AV_CEIL_RSHIFT(src_h, vsub); \
493  int dst_wp = AV_CEIL_RSHIFT(dst_w, hsub); \
494  int dst_hp = AV_CEIL_RSHIFT(dst_h, vsub); \
495  int yp = y>>vsub; \
496  int xp = x>>hsub; \
497  uint##depth##_t *s, *sp, *d, *dp, *dap, *a, *da, *ap; \
498  int jmax, j, k, kmax; \
499  int slice_start, slice_end; \
500  const uint##depth##_t max = (1 << nbits) - 1; \
501  const uint##depth##_t mid = (1 << (nbits -1)) ; \
502  int bytes = depth / 8; \
503  \
504  dst_step /= bytes; \
505  j = FFMAX(-yp, 0); \
506  jmax = FFMIN3(-yp + dst_hp, FFMIN(src_hp, dst_hp), yp + src_hp); \
507  \
508  slice_start = j + (jmax * jobnr) / nb_jobs; \
509  slice_end = j + (jmax * (jobnr+1)) / nb_jobs; \
510  \
511  sp = (uint##depth##_t *)(src->data[i] + (slice_start) * src->linesize[i]); \
512  dp = (uint##depth##_t *)(dst->data[dst_plane] \
513  + (yp + slice_start) * dst->linesize[dst_plane] \
514  + dst_offset); \
515  ap = (uint##depth##_t *)(src->data[3] + (slice_start << vsub) * src->linesize[3]); \
516  dap = (uint##depth##_t *)(dst->data[3] + ((yp + slice_start) << vsub) * dst->linesize[3]); \
517  \
518  for (j = slice_start; j < slice_end; j++) { \
519  k = FFMAX(-xp, 0); \
520  d = dp + (xp+k) * dst_step; \
521  s = sp + k; \
522  a = ap + (k<<hsub); \
523  da = dap + ((xp+k) << hsub); \
524  kmax = FFMIN(-xp + dst_wp, src_wp); \
525  \
526  if (nbits == 8 && ((vsub && j+1 < src_hp) || !vsub) && octx->blend_row[i]) { \
527  int c = octx->blend_row[i]((uint8_t*)d, (uint8_t*)da, (uint8_t*)s, \
528  (uint8_t*)a, kmax - k, src->linesize[3]); \
529  \
530  s += c; \
531  d += dst_step * c; \
532  da += (1 << hsub) * c; \
533  a += (1 << hsub) * c; \
534  k += c; \
535  } \
536  for (; k < kmax; k++) { \
537  int alpha_v, alpha_h, alpha; \
538  \
539  /* average alpha for color components, improve quality */ \
540  if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { \
541  alpha = (a[0] + a[src->linesize[3]] + \
542  a[1] + a[src->linesize[3]+1]) >> 2; \
543  } else if (hsub || vsub) { \
544  alpha_h = hsub && k+1 < src_wp ? \
545  (a[0] + a[1]) >> 1 : a[0]; \
546  alpha_v = vsub && j+1 < src_hp ? \
547  (a[0] + a[src->linesize[3]]) >> 1 : a[0]; \
548  alpha = (alpha_v + alpha_h) >> 1; \
549  } else \
550  alpha = a[0]; \
551  /* if the main channel has an alpha channel, alpha has to be calculated */ \
552  /* to create an un-premultiplied (straight) alpha value */ \
553  if (main_has_alpha && alpha != 0 && alpha != max) { \
554  /* average alpha for color components, improve quality */ \
555  uint8_t alpha_d; \
556  if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { \
557  alpha_d = (da[0] + da[dst->linesize[3]] + \
558  da[1] + da[dst->linesize[3]+1]) >> 2; \
559  } else if (hsub || vsub) { \
560  alpha_h = hsub && k+1 < src_wp ? \
561  (da[0] + da[1]) >> 1 : da[0]; \
562  alpha_v = vsub && j+1 < src_hp ? \
563  (da[0] + da[dst->linesize[3]]) >> 1 : da[0]; \
564  alpha_d = (alpha_v + alpha_h) >> 1; \
565  } else \
566  alpha_d = da[0]; \
567  alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); \
568  } \
569  if (straight) { \
570  if (nbits > 8) \
571  *d = (*d * (max - alpha) + *s * alpha) / max; \
572  else \
573  *d = FAST_DIV255(*d * (255 - alpha) + *s * alpha); \
574  } else { \
575  if (nbits > 8) { \
576  if (i && yuv) \
577  *d = av_clip((*d * (max - alpha) + *s * alpha) / max + *s - mid, -mid, mid) + mid; \
578  else \
579  *d = FFMIN((*d * (max - alpha) + *s * alpha) / max + *s, max); \
580  } else { \
581  if (i && yuv) \
582  *d = av_clip(FAST_DIV255((*d - mid) * (max - alpha)) + *s - mid, -mid, mid) + mid; \
583  else \
584  *d = FFMIN(FAST_DIV255(*d * (max - alpha)) + *s, max); \
585  } \
586  } \
587  s++; \
588  d += dst_step; \
589  da += 1 << hsub; \
590  a += 1 << hsub; \
591  } \
592  dp += dst->linesize[dst_plane] / bytes; \
593  sp += src->linesize[i] / bytes; \
594  ap += (1 << vsub) * src->linesize[3] / bytes; \
595  dap += (1 << vsub) * dst->linesize[3] / bytes; \
596  } \
597 }
598 DEFINE_BLEND_PLANE(8, 8);
599 DEFINE_BLEND_PLANE(16, 10);
600 
601 #define DEFINE_ALPHA_COMPOSITE(depth, nbits) \
602 static inline void alpha_composite_##depth##_##nbits##bits(const AVFrame *src, const AVFrame *dst, \
603  int src_w, int src_h, \
604  int dst_w, int dst_h, \
605  int x, int y, \
606  int jobnr, int nb_jobs) \
607 { \
608  uint##depth##_t alpha; /* the amount of overlay to blend on to main */ \
609  uint##depth##_t *s, *sa, *d, *da; \
610  int i, imax, j, jmax; \
611  int slice_start, slice_end; \
612  const uint##depth##_t max = (1 << nbits) - 1; \
613  int bytes = depth / 8; \
614  \
615  imax = FFMIN(-y + dst_h, src_h); \
616  slice_start = (imax * jobnr) / nb_jobs; \
617  slice_end = ((imax * (jobnr+1)) / nb_jobs); \
618  \
619  i = FFMAX(-y, 0); \
620  sa = (uint##depth##_t *)(src->data[3] + (i + slice_start) * src->linesize[3]); \
621  da = (uint##depth##_t *)(dst->data[3] + (y + i + slice_start) * dst->linesize[3]); \
622  \
623  for (i = i + slice_start; i < slice_end; i++) { \
624  j = FFMAX(-x, 0); \
625  s = sa + j; \
626  d = da + x+j; \
627  \
628  for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) { \
629  alpha = *s; \
630  if (alpha != 0 && alpha != max) { \
631  uint8_t alpha_d = *d; \
632  alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); \
633  } \
634  if (alpha == max) \
635  *d = *s; \
636  else if (alpha > 0) { \
637  /* apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha */ \
638  if (nbits > 8) \
639  *d += (max - *d) * *s / max; \
640  else \
641  *d += FAST_DIV255((max - *d) * *s); \
642  } \
643  d += 1; \
644  s += 1; \
645  } \
646  da += dst->linesize[3] / bytes; \
647  sa += src->linesize[3] / bytes; \
648  } \
649 }
651 DEFINE_ALPHA_COMPOSITE(16, 10);
652 
653 #define DEFINE_BLEND_SLICE_YUV(depth, nbits) \
654 static av_always_inline void blend_slice_yuv_##depth##_##nbits##bits(AVFilterContext *ctx, \
655  AVFrame *dst, const AVFrame *src, \
656  int hsub, int vsub, \
657  int main_has_alpha, \
658  int x, int y, \
659  int is_straight, \
660  int jobnr, int nb_jobs) \
661 { \
662  OverlayContext *s = ctx->priv; \
663  const int src_w = src->width; \
664  const int src_h = src->height; \
665  const int dst_w = dst->width; \
666  const int dst_h = dst->height; \
667  \
668  blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, \
669  x, y, main_has_alpha, s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, \
670  s->main_desc->comp[0].step, is_straight, 1, jobnr, nb_jobs); \
671  blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, \
672  x, y, main_has_alpha, s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, \
673  s->main_desc->comp[1].step, is_straight, 1, jobnr, nb_jobs); \
674  blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, \
675  x, y, main_has_alpha, s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, \
676  s->main_desc->comp[2].step, is_straight, 1, jobnr, nb_jobs); \
677  \
678  if (main_has_alpha) \
679  alpha_composite_##depth##_##nbits##bits(src, dst, src_w, src_h, dst_w, dst_h, x, y, \
680  jobnr, nb_jobs); \
681 }
683 DEFINE_BLEND_SLICE_YUV(16, 10);
684 
686  AVFrame *dst, const AVFrame *src,
687  int hsub, int vsub,
688  int main_has_alpha,
689  int x, int y,
690  int is_straight,
691  int jobnr,
692  int nb_jobs)
693 {
694  OverlayContext *s = ctx->priv;
695  const int src_w = src->width;
696  const int src_h = src->height;
697  const int dst_w = dst->width;
698  const int dst_h = dst->height;
699 
700  blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
701  s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 0,
702  jobnr, nb_jobs);
703  blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
704  s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, s->main_desc->comp[2].step, is_straight, 0,
705  jobnr, nb_jobs);
706  blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
707  s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 0,
708  jobnr, nb_jobs);
709 
710  if (main_has_alpha)
711  alpha_composite_8_8bits(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, nb_jobs);
712 }
713 
714 static int blend_slice_yuv420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
715 {
716  OverlayContext *s = ctx->priv;
717  ThreadData *td = arg;
718  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 1, jobnr, nb_jobs);
719  return 0;
720 }
721 
722 static int blend_slice_yuva420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
723 {
724  OverlayContext *s = ctx->priv;
725  ThreadData *td = arg;
726  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 1, jobnr, nb_jobs);
727  return 0;
728 }
729 
730 static int blend_slice_yuv420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
731 {
732  OverlayContext *s = ctx->priv;
733  ThreadData *td = arg;
734  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 1, jobnr, nb_jobs);
735  return 0;
736 }
737 
738 static int blend_slice_yuva420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
739 {
740  OverlayContext *s = ctx->priv;
741  ThreadData *td = arg;
742  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 1, jobnr, nb_jobs);
743  return 0;
744 }
745 
746 static int blend_slice_yuv422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
747 {
748  OverlayContext *s = ctx->priv;
749  ThreadData *td = arg;
750  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
751  return 0;
752 }
753 
754 static int blend_slice_yuva422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
755 {
756  OverlayContext *s = ctx->priv;
757  ThreadData *td = arg;
758  blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
759  return 0;
760 }
761 
762 static int blend_slice_yuv422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
763 {
764  OverlayContext *s = ctx->priv;
765  ThreadData *td = arg;
766  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
767  return 0;
768 }
769 
770 static int blend_slice_yuva422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
771 {
772  OverlayContext *s = ctx->priv;
773  ThreadData *td = arg;
774  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
775  return 0;
776 }
777 
778 static int blend_slice_yuv444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
779 {
780  OverlayContext *s = ctx->priv;
781  ThreadData *td = arg;
782  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
783  return 0;
784 }
785 
786 static int blend_slice_yuva444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
787 {
788  OverlayContext *s = ctx->priv;
789  ThreadData *td = arg;
790  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
791  return 0;
792 }
793 
794 static int blend_slice_gbrp(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
795 {
796  OverlayContext *s = ctx->priv;
797  ThreadData *td = arg;
798  blend_slice_planar_rgb(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
799  return 0;
800 }
801 
802 static int blend_slice_gbrap(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
803 {
804  OverlayContext *s = ctx->priv;
805  ThreadData *td = arg;
806  blend_slice_planar_rgb(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
807  return 0;
808 }
809 
810 static int blend_slice_yuv420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
811 {
812  OverlayContext *s = ctx->priv;
813  ThreadData *td = arg;
814  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 0, jobnr, nb_jobs);
815  return 0;
816 }
817 
818 static int blend_slice_yuva420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
819 {
820  OverlayContext *s = ctx->priv;
821  ThreadData *td = arg;
822  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 0, jobnr, nb_jobs);
823  return 0;
824 }
825 
826 static int blend_slice_yuv422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
827 {
828  OverlayContext *s = ctx->priv;
829  ThreadData *td = arg;
830  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
831  return 0;
832 }
833 
834 static int blend_slice_yuva422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
835 {
836  OverlayContext *s = ctx->priv;
837  ThreadData *td = arg;
838  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
839  return 0;
840 }
841 
842 static int blend_slice_yuv444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
843 {
844  OverlayContext *s = ctx->priv;
845  ThreadData *td = arg;
846  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
847  return 0;
848 }
849 
850 static int blend_slice_yuva444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
851 {
852  OverlayContext *s = ctx->priv;
853  ThreadData *td = arg;
854  blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
855  return 0;
856 }
857 
858 static int blend_slice_gbrp_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
859 {
860  OverlayContext *s = ctx->priv;
861  ThreadData *td = arg;
862  blend_slice_planar_rgb(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
863  return 0;
864 }
865 
866 static int blend_slice_gbrap_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
867 {
868  OverlayContext *s = ctx->priv;
869  ThreadData *td = arg;
870  blend_slice_planar_rgb(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
871  return 0;
872 }
873 
874 static int blend_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
875 {
876  OverlayContext *s = ctx->priv;
877  ThreadData *td = arg;
878  blend_slice_packed_rgb(ctx, td->dst, td->src, 0, s->x, s->y, 1, jobnr, nb_jobs);
879  return 0;
880 }
881 
882 static int blend_slice_rgba(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
883 {
884  OverlayContext *s = ctx->priv;
885  ThreadData *td = arg;
886  blend_slice_packed_rgb(ctx, td->dst, td->src, 1, s->x, s->y, 1, jobnr, nb_jobs);
887  return 0;
888 }
889 
890 static int blend_slice_rgb_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
891 {
892  OverlayContext *s = ctx->priv;
893  ThreadData *td = arg;
894  blend_slice_packed_rgb(ctx, td->dst, td->src, 0, s->x, s->y, 0, jobnr, nb_jobs);
895  return 0;
896 }
897 
898 static int blend_slice_rgba_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
899 {
900  OverlayContext *s = ctx->priv;
901  ThreadData *td = arg;
902  blend_slice_packed_rgb(ctx, td->dst, td->src, 1, s->x, s->y, 0, jobnr, nb_jobs);
903  return 0;
904 }
905 
907 {
908  OverlayContext *s = inlink->dst->priv;
909  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
910 
912 
913  s->hsub = pix_desc->log2_chroma_w;
914  s->vsub = pix_desc->log2_chroma_h;
915 
916  s->main_desc = pix_desc;
917 
918  s->main_is_packed_rgb =
919  ff_fill_rgba_map(s->main_rgba_map, inlink->format) >= 0;
921  switch (s->format) {
924  break;
927  break;
930  break;
933  break;
936  break;
937  case OVERLAY_FORMAT_RGB:
939  break;
940  case OVERLAY_FORMAT_GBRP:
942  break;
943  case OVERLAY_FORMAT_AUTO:
944  switch (inlink->format) {
945  case AV_PIX_FMT_YUVA420P:
947  break;
950  break;
951  case AV_PIX_FMT_YUVA422P:
953  break;
956  break;
957  case AV_PIX_FMT_YUVA444P:
959  break;
960  case AV_PIX_FMT_ARGB:
961  case AV_PIX_FMT_RGBA:
962  case AV_PIX_FMT_BGRA:
963  case AV_PIX_FMT_ABGR:
965  break;
966  case AV_PIX_FMT_GBRAP:
968  break;
969  default:
970  av_assert0(0);
971  break;
972  }
973  break;
974  }
975 
976  if (!s->alpha_format)
977  goto end;
978 
979  switch (s->format) {
982  break;
985  break;
988  break;
989  case OVERLAY_FORMAT_RGB:
991  break;
992  case OVERLAY_FORMAT_GBRP:
994  break;
995  case OVERLAY_FORMAT_AUTO:
996  switch (inlink->format) {
997  case AV_PIX_FMT_YUVA420P:
999  break;
1000  case AV_PIX_FMT_YUVA422P:
1002  break;
1003  case AV_PIX_FMT_YUVA444P:
1005  break;
1006  case AV_PIX_FMT_ARGB:
1007  case AV_PIX_FMT_RGBA:
1008  case AV_PIX_FMT_BGRA:
1009  case AV_PIX_FMT_ABGR:
1011  break;
1012  case AV_PIX_FMT_GBRAP:
1014  break;
1015  default:
1016  av_assert0(0);
1017  break;
1018  }
1019  break;
1020  }
1021 
1022 end:
1023  if (ARCH_X86)
1024  ff_overlay_init_x86(s, s->format, inlink->format,
1025  s->alpha_format, s->main_has_alpha);
1026 
1027  return 0;
1028 }
1029 
1031 {
1032  AVFilterContext *ctx = fs->parent;
1033  AVFrame *mainpic, *second;
1034  OverlayContext *s = ctx->priv;
1035  AVFilterLink *inlink = ctx->inputs[0];
1036  int ret;
1037 
1038  ret = ff_framesync_dualinput_get_writable(fs, &mainpic, &second);
1039  if (ret < 0)
1040  return ret;
1041  if (!second)
1042  return ff_filter_frame(ctx->outputs[0], mainpic);
1043 
1044  if (s->eval_mode == EVAL_MODE_FRAME) {
1045  int64_t pos = mainpic->pkt_pos;
1046 
1047  s->var_values[VAR_N] = inlink->frame_count_out;
1048  s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
1049  NAN : mainpic->pts * av_q2d(inlink->time_base);
1050  s->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1051 
1052  s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = second->width;
1053  s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = second->height;
1054  s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = mainpic->width;
1055  s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = mainpic->height;
1056 
1057  eval_expr(ctx);
1058  av_log(ctx, AV_LOG_DEBUG, "n:%f t:%f pos:%f x:%f xi:%d y:%f yi:%d\n",
1060  s->var_values[VAR_X], s->x,
1061  s->var_values[VAR_Y], s->y);
1062  }
1063 
1064  if (s->x < mainpic->width && s->x + second->width >= 0 &&
1065  s->y < mainpic->height && s->y + second->height >= 0) {
1066  ThreadData td;
1067 
1068  td.dst = mainpic;
1069  td.src = second;
1070  ctx->internal->execute(ctx, s->blend_slice, &td, NULL, FFMIN(FFMAX(1, FFMIN3(s->y + second->height, FFMIN(second->height, mainpic->height), mainpic->height - s->y)),
1071  ff_filter_get_nb_threads(ctx)));
1072  }
1073  return ff_filter_frame(ctx->outputs[0], mainpic);
1074 }
1075 
1077 {
1078  OverlayContext *s = ctx->priv;
1079 
1080  s->fs.on_event = do_blend;
1081  return 0;
1082 }
1083 
1085 {
1086  OverlayContext *s = ctx->priv;
1087  return ff_framesync_activate(&s->fs);
1088 }
1089 
1090 #define OFFSET(x) offsetof(OverlayContext, x)
1091 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1092 
1093 static const AVOption overlay_options[] = {
1094  { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, 0, 0, FLAGS },
1095  { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, 0, 0, FLAGS },
1096  { "eof_action", "Action to take when encountering EOF from secondary input ",
1097  OFFSET(fs.opt_eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
1098  EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" },
1099  { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
1100  { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
1101  { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" },
1102  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_FRAME}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
1103  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
1104  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
1105  { "shortest", "force termination when the shortest input terminates", OFFSET(fs.opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
1106  { "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, "format" },
1107  { "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" },
1108  { "yuv420p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420P10}, .flags = FLAGS, .unit = "format" },
1109  { "yuv422", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422}, .flags = FLAGS, .unit = "format" },
1110  { "yuv422p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422P10}, .flags = FLAGS, .unit = "format" },
1111  { "yuv444", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444}, .flags = FLAGS, .unit = "format" },
1112  { "rgb", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" },
1113  { "gbrp", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_GBRP}, .flags = FLAGS, .unit = "format" },
1114  { "auto", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_AUTO}, .flags = FLAGS, .unit = "format" },
1115  { "repeatlast", "repeat overlay of the last overlay frame", OFFSET(fs.opt_repeatlast), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
1116  { "alpha", "alpha format", OFFSET(alpha_format), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "alpha_format" },
1117  { "straight", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, .flags = FLAGS, .unit = "alpha_format" },
1118  { "premultiplied", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, .flags = FLAGS, .unit = "alpha_format" },
1119  { NULL }
1120 };
1121 
1123 
1125  {
1126  .name = "main",
1127  .type = AVMEDIA_TYPE_VIDEO,
1128  .config_props = config_input_main,
1129  },
1130  {
1131  .name = "overlay",
1132  .type = AVMEDIA_TYPE_VIDEO,
1133  .config_props = config_input_overlay,
1134  },
1135  { NULL }
1136 };
1137 
1139  {
1140  .name = "default",
1141  .type = AVMEDIA_TYPE_VIDEO,
1142  .config_props = config_output,
1143  },
1144  { NULL }
1145 };
1146 
1148  .name = "overlay",
1149  .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
1150  .preinit = overlay_framesync_preinit,
1151  .init = init,
1152  .uninit = uninit,
1153  .priv_size = sizeof(OverlayContext),
1154  .priv_class = &overlay_class,
1156  .activate = activate,
1158  .inputs = avfilter_vf_overlay_inputs,
1159  .outputs = avfilter_vf_overlay_outputs,
1162 };
static int activate(AVFilterContext *ctx)
Definition: vf_overlay.c:1084
int plane
Which of the 4 planes contains the component.
Definition: pixdesc.h:35
#define NULL
Definition: coverity.c:32
static int blend_slice_yuv420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:730
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
static int blend_slice_rgb_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:890
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVOption.
Definition: opt.h:248
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:436
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:579
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:437
Definition: aeval.c:48
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVFrame * old
Definition: vf_lagfun.c:77
const AVPixFmtDescriptor * main_desc
format descriptor for main input
Definition: vf_overlay.h:73
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
static int blend_slice_yuv444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:842
static const AVFilterPad avfilter_vf_overlay_inputs[]
Definition: vf_overlay.c:1124
void ff_overlay_init_x86(OverlayContext *s, int format, int pix_format, int alpha_format, int main_has_alpha)
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:124
static int blend_slice_rgba_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:898
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
static int blend_slice_yuva420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:818
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
static int blend_slice_gbrp(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:794
static int blend_slice_yuv420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:714
void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], const AVPixFmtDescriptor *pixdesc)
Compute the max pixel step for each plane of an image with a format described by pixdesc.
Definition: imgutils.c:35
const char * name
Pad name.
Definition: internal.h:60
AVFilterContext * parent
Parent filter context.
Definition: framesync.h:152
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
static int blend_slice_yuv422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:762
char * x_expr
Definition: vf_overlay.h:76
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:88
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
AVOptions.
timestamp utils, mostly useful for debugging/logging purposes
static const char *const var_names[]
Definition: vf_overlay.c:48
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:358
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define G
Definition: vf_overlay.c:67
double var_values[VAR_VARS_NB]
Definition: vf_overlay.h:75
static int blend_slice_yuva444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:786
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
Definition: framesync.c:396
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
Definition: eval.c:157
#define R
Definition: vf_overlay.c:66
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
#define FAST_DIV255(x)
Definition: vf_overlay.c:375
uint8_t overlay_rgba_map[4]
Definition: vf_overlay.h:62
AVFrame * dst
Definition: vf_blend.c:56
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
#define FFMIN3(a, b, c)
Definition: common.h:97
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AVExpr * y_pexpr
Definition: vf_overlay.h:78
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define OVERLAY
Definition: vf_overlay.c:64
int ff_fmt_is_in(int fmt, const int *fmts)
Tell if an integer is contained in the provided -1-terminated list of integers.
Definition: formats.c:254
#define av_log(a,...)
Definition: vf_blend.c:52
static int blend_slice_yuva422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:754
static int blend_slice_yuv444(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:778
A filter pad used for either input or output.
Definition: internal.h:54
int eval_mode
EvalMode.
Definition: vf_overlay.h:66
int format
OverlayFormat.
Definition: vf_overlay.h:64
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:600
#define td
Definition: regdef.h:70
#define UNPREMULTIPLY_ALPHA(x, y)
Definition: vf_overlay.c:381
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:290
static int blend_slice_gbrap_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:866
static int blend_slice_yuva422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:834
#define S(s, c, i)
Frame sync structure.
Definition: framesync.h:146
uint8_t main_has_alpha
Definition: vf_overlay.h:60
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const uint8_t * src
Definition: vf_bm3d.c:56
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
unsigned int pos
Definition: spdifenc.c:410
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
const char * arg
Definition: jacosubdec.c:66
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
static int config_input_overlay(AVFilterLink *inlink)
Definition: vf_overlay.c:310
static int blend_slice_yuva444_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:850
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter&#39;s input and try to produce output.
Definition: framesync.c:341
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
Definition: framesync.h:172
#define FFMAX(a, b)
Definition: common.h:94
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
#define fail()
Definition: checkasm.h:123
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
as above, but U and V bytes are swapped
Definition: pixfmt.h:90
static const AVFilterPad avfilter_vf_overlay_outputs[]
Definition: vf_overlay.c:1138
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:800
#define NAN
Definition: mathematics.h:64
#define FFMIN(a, b)
Definition: common.h:96
static int blend_slice_yuva422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:770
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
uint8_t main_rgba_map[4]
Definition: vf_overlay.h:59
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:470
AVFormatContext * ctx
Definition: movenc.c:48
Definition: aeval.c:51
#define B
Definition: vf_overlay.c:68
Definition: vf_blend.c:52
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
static av_cold int init(AVFilterContext *ctx)
Definition: vf_overlay.c:1076
int(* blend_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.h:82
int main_pix_step[4]
steps per pixel for each plane of the main output
Definition: vf_overlay.h:70
EvalMode
Definition: af_volume.h:39
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int blend_slice_yuv422_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:826
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
char * y_expr
Definition: vf_overlay.h:76
static const AVOption overlay_options[]
Definition: vf_overlay.c:1093
static int blend_slice_rgba(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:882
static int blend_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:874
static int blend_slice_gbrp_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:858
misc drawing utilities
#define DEFINE_BLEND_SLICE_YUV(depth, nbits)
Definition: vf_overlay.c:653
static av_always_inline void blend_slice_packed_rgb(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int main_has_alpha, int x, int y, int is_straight, int jobnr, int nb_jobs)
Blend image in src to destination buffer dst at position (x, y).
Definition: vf_overlay.c:387
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
static int blend_slice_yuv422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:746
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_overlay.c:81
Used for passing data between threads.
Definition: dsddec.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
Definition: vf_overlay.c:109
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static const int16_t alpha[]
Definition: ilbcdata.h:55
static int blend_slice_yuv420_pm(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:810
AVExpr * x_pexpr
Definition: vf_overlay.h:78
#define OFFSET(x)
Definition: vf_overlay.c:1090
int y
position of overlaid picture
Definition: vf_overlay.h:56
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
static int blend_slice_yuva420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:722
Filter definition.
Definition: avfilter.h:144
uint8_t overlay_has_alpha
Definition: vf_overlay.h:63
#define FLAGS
Definition: vf_overlay.c:1091
option
Definition: libkvazaar.c:291
#define isnan(x)
Definition: libm.h:340
uint8_t overlay_is_packed_rgb
Definition: vf_overlay.h:61
#define A
Definition: vf_overlay.c:69
const char * name
Filter name.
Definition: avfilter.h:148
static av_always_inline void blend_slice_planar_rgb(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int hsub, int vsub, int main_has_alpha, int x, int y, int is_straight, int jobnr, int nb_jobs)
Definition: vf_overlay.c:685
static int query_formats(AVFilterContext *ctx)
Definition: vf_overlay.c:162
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
int overlay_pix_step[4]
steps per pixel for each plane of the overlay
Definition: vf_overlay.h:71
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
AVFrame * overlay
int offset
Number of elements before the component of the first pixel.
Definition: pixdesc.h:47
#define flags(name, subs,...)
Definition: cbs_av1.c:560
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
#define DEFINE_BLEND_PLANE(depth, nbits)
Definition: vf_overlay.c:474
static int normalize_xy(double d, int chroma_sub)
Definition: vf_overlay.c:90
static int config_input_main(AVFilterLink *inlink)
Definition: vf_overlay.c:906
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define DEFINE_ALPHA_COMPOSITE(depth, nbits)
Definition: vf_overlay.c:601
common internal and external API header
static int blend_slice_gbrap(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:802
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
static int blend_slice_yuva420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_overlay.c:738
int vsub
chroma subsampling values
Definition: vf_overlay.h:72
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVFilter ff_vf_overlay
Definition: vf_overlay.c:1147
avfilter_execute_func * execute
Definition: internal.h:144
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2039
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
static int config_output(AVFilterLink *outlink)
Definition: vf_overlay.c:357
A list of supported formats for one end of a filter link.
Definition: formats.h:64
uint8_t main_is_packed_rgb
Definition: vf_overlay.h:58
An instance of a filter.
Definition: avfilter.h:338
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_overlay.c:130
FFFrameSync fs
Definition: vf_overlay.h:68
int height
Definition: frame.h:366
FRAMESYNC_DEFINE_CLASS(overlay, OverlayContext, fs)
#define MAIN
Definition: vf_overlay.c:63
#define av_freep(p)
const void ** s
#define av_always_inline
Definition: attributes.h:45
static void eval_expr(AVFilterContext *ctx)
Definition: vf_overlay.c:97
static int do_blend(FFFrameSync *fs)
Definition: vf_overlay.c:1030
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
AVFilterLink * inlink
Definition: vf_blend.c:57
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int i
Definition: input.c:406
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static enum AVPixelFormat alpha_pix_fmts[]
Definition: vf_overlay.c:155
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
simple arithmetic expression evaluator
int * formats
list of media formats
Definition: formats.h:66