FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_blend.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/imgutils.h"
22 #include "libavutil/eval.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixfmt.h"
25 #include "avfilter.h"
26 #include "bufferqueue.h"
27 #include "formats.h"
28 #include "internal.h"
29 #include "video.h"
30 
31 #define TOP 0
32 #define BOTTOM 1
33 
34 enum BlendMode {
61 };
62 
63 static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "A", "B", "TOP", "BOTTOM", NULL };
65 
66 typedef struct FilterParams {
69  double opacity;
71  char *expr_str;
72  void (*blend)(const uint8_t *top, int top_linesize,
73  const uint8_t *bottom, int bottom_linesize,
74  uint8_t *dst, int dst_linesize,
75  int width, int height, struct FilterParams *param);
76 } FilterParams;
77 
78 typedef struct {
79  const AVClass *class;
80  struct FFBufQueue queue_top;
81  struct FFBufQueue queue_bottom;
82  int hsub, vsub; ///< chroma subsampling values
84  char *all_expr;
86  double all_opacity;
87 
88  FilterParams params[4];
89 } BlendContext;
90 
91 #define OFFSET(x) offsetof(BlendContext, x)
92 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
93 
94 static const AVOption blend_options[] = {
95  { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
96  { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
97  { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
98  { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
99  { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},
100  { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },
101  { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },
102  { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },
103  { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },
104  { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },
105  { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },
106  { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },
107  { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },
108  { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },
109  { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },
110  { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },
111  { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },
112  { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },
113  { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },
114  { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },
115  { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },
116  { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },
117  { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },
118  { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },
119  { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },
120  { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },
121  { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },
122  { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },
123  { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },
124  { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
125  { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
126  { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
127  { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
128  { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
129  { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
130  { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
131  { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
132  { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
133  { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
134  { NULL },
135 };
136 
137 AVFILTER_DEFINE_CLASS(blend);
138 
139 static void blend_normal(const uint8_t *top, int top_linesize,
140  const uint8_t *bottom, int bottom_linesize,
141  uint8_t *dst, int dst_linesize,
142  int width, int height, FilterParams *param)
143 {
144  av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, height);
145 }
146 
147 #define DEFINE_BLEND(name, expr) \
148 static void blend_## name(const uint8_t *top, int top_linesize, \
149  const uint8_t *bottom, int bottom_linesize, \
150  uint8_t *dst, int dst_linesize, \
151  int width, int height, FilterParams *param) \
152 { \
153  double opacity = param->opacity; \
154  int i, j; \
155  \
156  for (i = 0; i < height; i++) { \
157  for (j = 0; j < width; j++) { \
158  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
159  } \
160  dst += dst_linesize; \
161  top += top_linesize; \
162  bottom += bottom_linesize; \
163  } \
164 }
165 
166 #define A top[j]
167 #define B bottom[j]
168 
169 #define MULTIPLY(x, a, b) (x * ((a * b) / 255))
170 #define SCREEN(x, a, b) (255 - x * ((255 - a) * (255 - b) / 255))
171 #define BURN(a, b) ((a == 0) ? a : FFMAX(0, 255 - ((255 - b) << 8) / a))
172 #define DODGE(a, b) ((a == 255) ? a : FFMIN(255, ((b << 8) / (255 - a))))
173 
174 DEFINE_BLEND(addition, FFMIN(255, A + B))
175 DEFINE_BLEND(average, (A + B) / 2)
176 DEFINE_BLEND(subtract, FFMAX(0, A - B))
177 DEFINE_BLEND(multiply, MULTIPLY(1, A, B))
178 DEFINE_BLEND(negation, 255 - FFABS(255 - A - B))
179 DEFINE_BLEND(difference, FFABS(A - B))
180 DEFINE_BLEND(screen, SCREEN(1, A, B))
181 DEFINE_BLEND(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
182 DEFINE_BLEND(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
183 DEFINE_BLEND(darken, FFMIN(A, B))
184 DEFINE_BLEND(lighten, FFMAX(A, B))
185 DEFINE_BLEND(divide, ((float)A / ((float)B) * 255))
186 DEFINE_BLEND(dodge, DODGE(A, B))
187 DEFINE_BLEND(burn, BURN(A, B))
188 DEFINE_BLEND(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - FFABS(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - FFABS(B - 127.5)/255))
189 DEFINE_BLEND(exclusion, A + B - 2 * A * B / 255)
190 DEFINE_BLEND(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
191 DEFINE_BLEND(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
192 DEFINE_BLEND(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
193 DEFINE_BLEND(and, A & B)
194 DEFINE_BLEND(or, A | B)
195 DEFINE_BLEND(xor, A ^ B)
196 DEFINE_BLEND(vividlight, (B < 128) ? BURN(A, 2 * B) : DODGE(A, 2 * (B - 128)))
197 
198 static void blend_expr(const uint8_t *top, int top_linesize,
199  const uint8_t *bottom, int bottom_linesize,
200  uint8_t *dst, int dst_linesize,
201  int width, int height,
202  FilterParams *param)
203 {
204  AVExpr *e = param->e;
205  double *values = param->values;
206  int y, x;
207 
208  for (y = 0; y < height; y++) {
209  values[VAR_Y] = y;
210  for (x = 0; x < width; x++) {
211  values[VAR_X] = x;
212  values[VAR_TOP] = values[VAR_A] = top[x];
213  values[VAR_BOTTOM] = values[VAR_B] = bottom[x];
214  dst[x] = av_expr_eval(e, values, NULL);
215  }
216  dst += dst_linesize;
217  top += top_linesize;
218  bottom += bottom_linesize;
219  }
220 }
221 
222 static av_cold int init(AVFilterContext *ctx, const char *args)
223 {
224  BlendContext *b = ctx->priv;
225  int ret, plane;
226 
227  b->class = &blend_class;
229 
230  if ((ret = av_set_options_string(b, args, "=", ":")) < 0)
231  return ret;
232 
233  for (plane = 0; plane < FF_ARRAY_ELEMS(b->params); plane++) {
234  FilterParams *param = &b->params[plane];
235 
236  if (b->all_mode >= 0)
237  param->mode = b->all_mode;
238  if (b->all_opacity < 1)
239  param->opacity = b->all_opacity;
240 
241  switch (param->mode) {
242  case BLEND_ADDITION: param->blend = blend_addition; break;
243  case BLEND_AND: param->blend = blend_and; break;
244  case BLEND_AVERAGE: param->blend = blend_average; break;
245  case BLEND_BURN: param->blend = blend_burn; break;
246  case BLEND_DARKEN: param->blend = blend_darken; break;
247  case BLEND_DIFFERENCE: param->blend = blend_difference; break;
248  case BLEND_DIVIDE: param->blend = blend_divide; break;
249  case BLEND_DODGE: param->blend = blend_dodge; break;
250  case BLEND_EXCLUSION: param->blend = blend_exclusion; break;
251  case BLEND_HARDLIGHT: param->blend = blend_hardlight; break;
252  case BLEND_LIGHTEN: param->blend = blend_lighten; break;
253  case BLEND_MULTIPLY: param->blend = blend_multiply; break;
254  case BLEND_NEGATION: param->blend = blend_negation; break;
255  case BLEND_NORMAL: param->blend = blend_normal; break;
256  case BLEND_OR: param->blend = blend_or; break;
257  case BLEND_OVERLAY: param->blend = blend_overlay; break;
258  case BLEND_PHOENIX: param->blend = blend_phoenix; break;
259  case BLEND_PINLIGHT: param->blend = blend_pinlight; break;
260  case BLEND_REFLECT: param->blend = blend_reflect; break;
261  case BLEND_SCREEN: param->blend = blend_screen; break;
262  case BLEND_SOFTLIGHT: param->blend = blend_softlight; break;
263  case BLEND_SUBTRACT: param->blend = blend_subtract; break;
264  case BLEND_VIVIDLIGHT: param->blend = blend_vividlight; break;
265  case BLEND_XOR: param->blend = blend_xor; break;
266  }
267 
268  if (b->all_expr && !param->expr_str) {
269  param->expr_str = av_strdup(b->all_expr);
270  if (!param->expr_str)
271  return AVERROR(ENOMEM);
272  }
273  if (param->expr_str) {
274  ret = av_expr_parse(&param->e, param->expr_str, var_names,
275  NULL, NULL, NULL, NULL, 0, ctx);
276  if (ret < 0)
277  return ret;
278  param->blend = blend_expr;
279  }
280  }
281 
282  return 0;
283 }
284 
286 {
287  static const enum AVPixelFormat pix_fmts[] = {
291  };
292 
294  return 0;
295 }
296 
297 static int config_output(AVFilterLink *outlink)
298 {
299  AVFilterContext *ctx = outlink->src;
300  AVFilterLink *toplink = ctx->inputs[TOP];
301  AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
302 
303  if (toplink->format != bottomlink->format) {
304  av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
305  return AVERROR(EINVAL);
306  }
307  if (toplink->w != bottomlink->w ||
308  toplink->h != bottomlink->h ||
309  toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num ||
310  toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) {
311  av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
312  "(size %dx%d, SAR %d:%d) do not match the corresponding "
313  "second input link %s parameters (%dx%d, SAR %d:%d)\n",
314  ctx->input_pads[TOP].name, toplink->w, toplink->h,
315  toplink->sample_aspect_ratio.num,
316  toplink->sample_aspect_ratio.den,
317  ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h,
318  bottomlink->sample_aspect_ratio.num,
319  bottomlink->sample_aspect_ratio.den);
320  return AVERROR(EINVAL);
321  }
322 
323  outlink->w = toplink->w;
324  outlink->h = bottomlink->h;
325  outlink->time_base = toplink->time_base;
326  outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
327  outlink->frame_rate = toplink->frame_rate;
328  return 0;
329 }
330 
331 static int config_input_top(AVFilterLink *inlink)
332 {
333  BlendContext *b = inlink->dst->priv;
334  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
335 
336  b->hsub = pix_desc->log2_chroma_w;
337  b->vsub = pix_desc->log2_chroma_h;
338  return 0;
339 }
340 
341 static av_cold void uninit(AVFilterContext *ctx)
342 {
343  BlendContext *b = ctx->priv;
344  int i;
345 
346  av_opt_free(b);
349 
350  for (i = 0; i < FF_ARRAY_ELEMS(b->params); i++)
351  av_expr_free(b->params[i].e);
352 }
353 
354 static int request_frame(AVFilterLink *outlink)
355 {
356  AVFilterContext *ctx = outlink->src;
357  BlendContext *b = ctx->priv;
358  int in, ret;
359 
360  b->frame_requested = 1;
361  while (b->frame_requested) {
362  in = ff_bufqueue_peek(&b->queue_top, TOP) ? BOTTOM : TOP;
363  ret = ff_request_frame(ctx->inputs[in]);
364  if (ret < 0)
365  return ret;
366  }
367  return 0;
368 }
369 
370 static void blend_frame(AVFilterContext *ctx,
371  AVFilterBufferRef *top_buf,
372  AVFilterBufferRef *bottom_buf,
373  AVFilterBufferRef *dst_buf)
374 {
375  BlendContext *b = ctx->priv;
376  AVFilterLink *inlink = ctx->inputs[0];
377  FilterParams *param;
378  int plane;
379 
380  for (plane = 0; dst_buf->data[plane]; plane++) {
381  int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
382  int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
383  int outw = dst_buf->video->w >> hsub;
384  int outh = dst_buf->video->h >> vsub;
385  uint8_t *dst = dst_buf->data[plane];
386  uint8_t *top = top_buf->data[plane];
387  uint8_t *bottom = bottom_buf->data[plane];
388 
389  param = &b->params[plane];
390  param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base);
391  param->values[VAR_W] = outw;
392  param->values[VAR_H] = outh;
393  param->values[VAR_SW] = outw / dst_buf->video->w;
394  param->values[VAR_SH] = outh / dst_buf->video->h;
395  param->blend(top, top_buf->linesize[plane],
396  bottom, bottom_buf->linesize[plane],
397  dst, dst_buf->linesize[plane], outw, outh, param);
398  }
399 }
400 
401 static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
402 {
403  AVFilterContext *ctx = inlink->dst;
404  AVFilterLink *outlink = ctx->outputs[0];
405  BlendContext *b = ctx->priv;
406 
407  int ret = 0;
408  int is_bottom = (inlink == ctx->inputs[BOTTOM]);
409  struct FFBufQueue *queue =
410  (is_bottom ? &b->queue_bottom : &b->queue_top);
411  ff_bufqueue_add(ctx, queue, buf);
412 
413  while (1) {
414  AVFilterBufferRef *top_buf, *bottom_buf, *out_buf;
415 
416  if (!ff_bufqueue_peek(&b->queue_top, TOP) ||
417  !ff_bufqueue_peek(&b->queue_bottom, BOTTOM)) break;
418 
419  top_buf = ff_bufqueue_get(&b->queue_top);
420  bottom_buf = ff_bufqueue_get(&b->queue_bottom);
421 
422  out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
423  outlink->w, outlink->h);
424  if (!out_buf) {
425  return AVERROR(ENOMEM);
426  }
427  avfilter_copy_buffer_ref_props(out_buf, top_buf);
428 
429  b->frame_requested = 0;
430  blend_frame(ctx, top_buf, bottom_buf, out_buf);
431  ret = ff_filter_frame(ctx->outputs[0], out_buf);
432  avfilter_unref_buffer(top_buf);
433  avfilter_unref_buffer(bottom_buf);
434  }
435  return ret;
436 }
437 
438 static const AVFilterPad blend_inputs[] = {
439  {
440  .name = "top",
441  .type = AVMEDIA_TYPE_VIDEO,
442  .config_props = config_input_top,
443  .filter_frame = filter_frame,
444  .min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
445  },{
446  .name = "bottom",
447  .type = AVMEDIA_TYPE_VIDEO,
448  .filter_frame = filter_frame,
449  .min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
450  },
451  { NULL }
452 };
453 
454 static const AVFilterPad blend_outputs[] = {
455  {
456  .name = "default",
457  .type = AVMEDIA_TYPE_VIDEO,
458  .config_props = config_output,
459  .request_frame = request_frame,
460  },
461  { NULL }
462 };
463 
465  .name = "blend",
466  .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
467  .init = init,
468  .uninit = uninit,
469  .priv_size = sizeof(BlendContext),
471  .inputs = blend_inputs,
472  .outputs = blend_outputs,
473  .priv_class = &blend_class,
474 };