FFmpeg
vf_blend.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/imgutils.h"
22 #include "libavutil/eval.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixfmt.h"
25 #include "avfilter.h"
26 #include "formats.h"
27 #include "framesync.h"
28 #include "internal.h"
29 #include "video.h"
30 #include "blend.h"
31 
32 #define TOP 0
33 #define BOTTOM 1
34 
35 typedef struct BlendContext {
36  const AVClass *class;
38  int hsub, vsub; ///< chroma subsampling values
39  int nb_planes;
40  char *all_expr;
42  double all_opacity;
43 
44  int depth;
46  int tblend;
47  AVFrame *prev_frame; /* only used with tblend */
48 } BlendContext;
49 
50 static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
52 
53 typedef struct ThreadData {
54  const AVFrame *top, *bottom;
57  int plane;
58  int w, h;
60 } ThreadData;
61 
62 #define COMMON_OPTIONS \
63  { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
64  { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
65  { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
66  { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
67  { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},\
68  { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },\
69  { "addition128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, "mode" },\
70  { "grainmerge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, "mode" },\
71  { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },\
72  { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },\
73  { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },\
74  { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },\
75  { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },\
76  { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, "mode" },\
77  { "grainextract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, "mode" },\
78  { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },\
79  { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },\
80  { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },\
81  { "extremity", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXTREMITY}, 0, 0, FLAGS, "mode" },\
82  { "freeze", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_FREEZE}, 0, 0, FLAGS, "mode" },\
83  { "glow", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GLOW}, 0, 0, FLAGS, "mode" },\
84  { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },\
85  { "hardmix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDMIX}, 0, 0, FLAGS, "mode" },\
86  { "heat", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HEAT}, 0, 0, FLAGS, "mode" },\
87  { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },\
88  { "linearlight","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LINEARLIGHT},0, 0, FLAGS, "mode" },\
89  { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },\
90  { "multiply128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY128},0, 0, FLAGS, "mode" },\
91  { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },\
92  { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },\
93  { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },\
94  { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },\
95  { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },\
96  { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },\
97  { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },\
98  { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },\
99  { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },\
100  { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },\
101  { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },\
102  { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },\
103  { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
104  { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
105  { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
106  { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
107  { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
108  { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
109  { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
110  { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
111  { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
112  { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS}
113 
114 #define OFFSET(x) offsetof(BlendContext, x)
115 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
116 
117 static const AVOption blend_options[] = {
119  { NULL }
120 };
121 
123 
124 #define COPY(src, depth) \
125 static void blend_copy ## src##_##depth(const uint8_t *top, ptrdiff_t top_linesize, \
126  const uint8_t *bottom, ptrdiff_t bottom_linesize,\
127  uint8_t *dst, ptrdiff_t dst_linesize, \
128  ptrdiff_t width, ptrdiff_t height, \
129  FilterParams *param, double *values, int starty) \
130 { \
131  av_image_copy_plane(dst, dst_linesize, src, src ## _linesize, \
132  width * depth / 8, height); \
133 }
134 
135 COPY(top, 8)
136 COPY(bottom, 8)
137 
138 COPY(top, 16)
139 COPY(bottom, 16)
140 
141 #undef COPY
142 
143 static void blend_normal_8bit(const uint8_t *top, ptrdiff_t top_linesize,
144  const uint8_t *bottom, ptrdiff_t bottom_linesize,
145  uint8_t *dst, ptrdiff_t dst_linesize,
146  ptrdiff_t width, ptrdiff_t height,
147  FilterParams *param, double *values, int starty)
148 {
149  const double opacity = param->opacity;
150  int i, j;
151 
152  for (i = 0; i < height; i++) {
153  for (j = 0; j < width; j++) {
154  dst[j] = top[j] * opacity + bottom[j] * (1. - opacity);
155  }
156  dst += dst_linesize;
157  top += top_linesize;
158  bottom += bottom_linesize;
159  }
160 }
161 
162 static void blend_normal_16bit(const uint8_t *_top, ptrdiff_t top_linesize,
163  const uint8_t *_bottom, ptrdiff_t bottom_linesize,
164  uint8_t *_dst, ptrdiff_t dst_linesize,
165  ptrdiff_t width, ptrdiff_t height,
166  FilterParams *param, double *values, int starty)
167 {
168  const uint16_t *top = (uint16_t*)_top;
169  const uint16_t *bottom = (uint16_t*)_bottom;
170  uint16_t *dst = (uint16_t*)_dst;
171  const double opacity = param->opacity;
172  int i, j;
173  dst_linesize /= 2;
174  top_linesize /= 2;
175  bottom_linesize /= 2;
176 
177  for (i = 0; i < height; i++) {
178  for (j = 0; j < width; j++) {
179  dst[j] = top[j] * opacity + bottom[j] * (1. - opacity);
180  }
181  dst += dst_linesize;
182  top += top_linesize;
183  bottom += bottom_linesize;
184  }
185 }
186 
187 #define DEFINE_BLEND8(name, expr) \
188 static void blend_## name##_8bit(const uint8_t *top, ptrdiff_t top_linesize, \
189  const uint8_t *bottom, ptrdiff_t bottom_linesize, \
190  uint8_t *dst, ptrdiff_t dst_linesize, \
191  ptrdiff_t width, ptrdiff_t height, \
192  FilterParams *param, double *values, int starty) \
193 { \
194  double opacity = param->opacity; \
195  int i, j; \
196  \
197  for (i = 0; i < height; i++) { \
198  for (j = 0; j < width; j++) { \
199  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
200  } \
201  dst += dst_linesize; \
202  top += top_linesize; \
203  bottom += bottom_linesize; \
204  } \
205 }
206 
207 #define DEFINE_BLEND16(name, expr, depth) \
208 static void blend_## name##_##depth##bit(const uint8_t *_top, ptrdiff_t top_linesize,\
209  const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
210  uint8_t *_dst, ptrdiff_t dst_linesize, \
211  ptrdiff_t width, ptrdiff_t height, \
212  FilterParams *param, double *values, int starty) \
213 { \
214  const uint16_t *top = (const uint16_t*)_top; \
215  const uint16_t *bottom = (const uint16_t*)_bottom; \
216  uint16_t *dst = (uint16_t*)_dst; \
217  double opacity = param->opacity; \
218  int i, j; \
219  dst_linesize /= 2; \
220  top_linesize /= 2; \
221  bottom_linesize /= 2; \
222  \
223  for (i = 0; i < height; i++) { \
224  for (j = 0; j < width; j++) { \
225  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
226  } \
227  dst += dst_linesize; \
228  top += top_linesize; \
229  bottom += bottom_linesize; \
230  } \
231 }
232 
233 #define A top[j]
234 #define B bottom[j]
235 
236 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 255))
237 #define SCREEN(x, a, b) (255 - (x) * ((255 - (a)) * (255 - (b)) / 255))
238 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 255 - ((255 - (b)) << 8) / (a)))
239 #define DODGE(a, b) (((a) == 255) ? (a) : FFMIN(255, (((b) << 8) / (255 - (a)))))
240 
241 DEFINE_BLEND8(addition, FFMIN(255, A + B))
242 DEFINE_BLEND8(grainmerge, av_clip_uint8(A + B - 128))
243 DEFINE_BLEND8(average, (A + B) / 2)
244 DEFINE_BLEND8(subtract, FFMAX(0, A - B))
246 DEFINE_BLEND8(multiply128,av_clip_uint8((A - 128) * B / 32. + 128))
247 DEFINE_BLEND8(negation, 255 - FFABS(255 - A - B))
248 DEFINE_BLEND8(extremity, FFABS(255 - A - B))
249 DEFINE_BLEND8(difference, FFABS(A - B))
250 DEFINE_BLEND8(grainextract, av_clip_uint8(128 + A - B))
251 DEFINE_BLEND8(screen, SCREEN(1, A, B))
252 DEFINE_BLEND8(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
253 DEFINE_BLEND8(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
254 DEFINE_BLEND8(hardmix, (A < (255 - B)) ? 0: 255)
255 DEFINE_BLEND8(heat, (A == 0) ? 0 : 255 - FFMIN(((255 - B) * (255 - B)) / A, 255))
256 DEFINE_BLEND8(freeze, (B == 0) ? 0 : 255 - FFMIN(((255 - A) * (255 - A)) / B, 255))
257 DEFINE_BLEND8(darken, FFMIN(A, B))
258 DEFINE_BLEND8(lighten, FFMAX(A, B))
259 DEFINE_BLEND8(divide, av_clip_uint8(B == 0 ? 255 : 255 * A / B))
260 DEFINE_BLEND8(dodge, DODGE(A, B))
261 DEFINE_BLEND8(burn, BURN(A, B))
262 DEFINE_BLEND8(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - fabs(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - fabs(B - 127.5)/255))
263 DEFINE_BLEND8(exclusion, A + B - 2 * A * B / 255)
264 DEFINE_BLEND8(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
265 DEFINE_BLEND8(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
266 DEFINE_BLEND8(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
267 DEFINE_BLEND8(glow, (A == 255) ? A : FFMIN(255, (B * B / (255 - A))))
268 DEFINE_BLEND8(and, A & B)
269 DEFINE_BLEND8(or, A | B)
270 DEFINE_BLEND8(xor, A ^ B)
271 DEFINE_BLEND8(vividlight, (A < 128) ? BURN(2 * A, B) : DODGE(2 * (A - 128), B))
272 DEFINE_BLEND8(linearlight,av_clip_uint8((B < 128) ? B + 2 * A - 255 : B + 2 * (A - 128)))
273 
274 #undef MULTIPLY
275 #undef SCREEN
276 #undef BURN
277 #undef DODGE
278 
279 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 65535))
280 #define SCREEN(x, a, b) (65535 - (x) * ((65535 - (a)) * (65535 - (b)) / 65535))
281 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 65535 - ((65535 - (b)) << 16) / (a)))
282 #define DODGE(a, b) (((a) == 65535) ? (a) : FFMIN(65535, (((b) << 16) / (65535 - (a)))))
283 
284 DEFINE_BLEND16(addition, FFMIN(65535, A + B), 16)
285 DEFINE_BLEND16(grainmerge, av_clip_uint16(A + B - 32768), 16)
286 DEFINE_BLEND16(average, (A + B) / 2, 16)
287 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 16)
288 DEFINE_BLEND16(multiply, MULTIPLY(1, A, B), 16)
289 DEFINE_BLEND16(multiply128, av_clip_uint16((A - 32768) * B / 8192. + 32768), 16)
290 DEFINE_BLEND16(negation, 65535 - FFABS(65535 - A - B), 16)
291 DEFINE_BLEND16(extremity, FFABS(65535 - A - B), 16)
292 DEFINE_BLEND16(difference, FFABS(A - B), 16)
293 DEFINE_BLEND16(grainextract, av_clip_uint16(32768 + A - B), 16)
294 DEFINE_BLEND16(screen, SCREEN(1, A, B), 16)
295 DEFINE_BLEND16(overlay, (A < 32768) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 16)
296 DEFINE_BLEND16(hardlight, (B < 32768) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 16)
297 DEFINE_BLEND16(hardmix, (A < (65535 - B)) ? 0: 65535, 16)
298 DEFINE_BLEND16(heat, (A == 0) ? 0 : 65535 - FFMIN(((65535 - B) * (65535 - B)) / A, 65535), 16)
299 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 65535 - FFMIN(((65535 - A) * (65535 - A)) / B, 65535), 16)
300 DEFINE_BLEND16(darken, FFMIN(A, B), 16)
301 DEFINE_BLEND16(lighten, FFMAX(A, B), 16)
302 DEFINE_BLEND16(divide, av_clip_uint16(B == 0 ? 65535 : 65535 * A / B), 16)
303 DEFINE_BLEND16(dodge, DODGE(A, B), 16)
304 DEFINE_BLEND16(burn, BURN(A, B), 16)
305 DEFINE_BLEND16(softlight, (A > 32767) ? B + (65535 - B) * (A - 32767.5) / 32767.5 * (0.5 - fabs(B - 32767.5) / 65535): B - B * ((32767.5 - A) / 32767.5) * (0.5 - fabs(B - 32767.5)/65535), 16)
306 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 65535, 16)
307 DEFINE_BLEND16(pinlight, (B < 32768) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 32768)), 16)
308 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 65535, 16)
309 DEFINE_BLEND16(reflect, (B == 65535) ? B : FFMIN(65535, (A * A / (65535 - B))), 16)
310 DEFINE_BLEND16(glow, (A == 65535) ? A : FFMIN(65535, (B * B / (65535 - A))), 16)
311 DEFINE_BLEND16(and, A & B, 16)
312 DEFINE_BLEND16(or, A | B, 16)
313 DEFINE_BLEND16(xor, A ^ B, 16)
314 DEFINE_BLEND16(vividlight, (A < 32768) ? BURN(2 * A, B) : DODGE(2 * (A - 32768), B), 16)
315 DEFINE_BLEND16(linearlight,av_clip_uint16((B < 32768) ? B + 2 * A - 65535 : B + 2 * (A - 32768)), 16)
316 
317 #undef MULTIPLY
318 #undef SCREEN
319 #undef BURN
320 #undef DODGE
321 
322 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 1023))
323 #define SCREEN(x, a, b) (1023 - (x) * ((1023 - (a)) * (1023 - (b)) / 1023))
324 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 1023 - ((1023 - (b)) << 10) / (a)))
325 #define DODGE(a, b) (((a) == 1023) ? (a) : FFMIN(1023, (((b) << 10) / (1023 - (a)))))
326 
327 DEFINE_BLEND16(addition, FFMIN(1023, A + B), 10)
328 DEFINE_BLEND16(grainmerge, (int)av_clip_uintp2(A + B - 512, 10), 10)
329 DEFINE_BLEND16(average, (A + B) / 2, 10)
330 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 10)
331 DEFINE_BLEND16(multiply, MULTIPLY(1, A, B), 10)
332 DEFINE_BLEND16(multiply128, (int)av_clip_uintp2((A - 512) * B / 128. + 512, 10), 10)
333 DEFINE_BLEND16(negation, 1023 - FFABS(1023 - A - B), 10)
334 DEFINE_BLEND16(extremity, FFABS(1023 - A - B), 10)
335 DEFINE_BLEND16(difference, FFABS(A - B), 10)
336 DEFINE_BLEND16(grainextract, (int)av_clip_uintp2(512 + A - B, 10), 10)
337 DEFINE_BLEND16(screen, SCREEN(1, A, B), 10)
338 DEFINE_BLEND16(overlay, (A < 512) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 10)
339 DEFINE_BLEND16(hardlight, (B < 512) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 10)
340 DEFINE_BLEND16(hardmix, (A < (1023 - B)) ? 0: 1023, 10)
341 DEFINE_BLEND16(heat, (A == 0) ? 0 : 1023 - FFMIN(((1023 - B) * (1023 - B)) / A, 1023), 10)
342 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 1023 - FFMIN(((1023 - A) * (1023 - A)) / B, 1023), 10)
343 DEFINE_BLEND16(darken, FFMIN(A, B), 10)
344 DEFINE_BLEND16(lighten, FFMAX(A, B), 10)
345 DEFINE_BLEND16(divide, (int)av_clip_uintp2(B == 0 ? 1023 : 1023 * A / B, 10), 10)
346 DEFINE_BLEND16(dodge, DODGE(A, B), 10)
347 DEFINE_BLEND16(burn, BURN(A, B), 10)
348 DEFINE_BLEND16(softlight, (A > 511) ? B + (1023 - B) * (A - 511.5) / 511.5 * (0.5 - fabs(B - 511.5) / 1023): B - B * ((511.5 - A) / 511.5) * (0.5 - fabs(B - 511.5)/1023), 10)
349 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 1023, 10)
350 DEFINE_BLEND16(pinlight, (B < 512) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 512)), 10)
351 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 1023, 10)
352 DEFINE_BLEND16(reflect, (B == 1023) ? B : FFMIN(1023, (A * A / (1023 - B))), 10)
353 DEFINE_BLEND16(glow, (A == 1023) ? A : FFMIN(1023, (B * B / (1023 - A))), 10)
354 DEFINE_BLEND16(and, A & B, 10)
355 DEFINE_BLEND16(or, A | B, 10)
356 DEFINE_BLEND16(xor, A ^ B, 10)
357 DEFINE_BLEND16(vividlight, (A < 512) ? BURN(2 * A, B) : DODGE(2 * (A - 512), B), 10)
358 DEFINE_BLEND16(linearlight,(int)av_clip_uintp2((B < 512) ? B + 2 * A - 1023 : B + 2 * (A - 512), 10), 10)
359 
360 #undef MULTIPLY
361 #undef SCREEN
362 #undef BURN
363 #undef DODGE
364 
365 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 4095))
366 #define SCREEN(x, a, b) (4095 - (x) * ((4095 - (a)) * (4095 - (b)) / 4095))
367 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 4095 - ((4095 - (b)) << 12) / (a)))
368 #define DODGE(a, b) (((a) == 4095) ? (a) : FFMIN(4095, (((b) << 12) / (4095 - (a)))))
369 
370 DEFINE_BLEND16(addition, FFMIN(4095, A + B), 12)
371 DEFINE_BLEND16(grainmerge, (int)av_clip_uintp2(A + B - 2048, 12), 12)
372 DEFINE_BLEND16(average, (A + B) / 2, 12)
373 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 12)
374 DEFINE_BLEND16(multiply, MULTIPLY(1, A, B), 12)
375 DEFINE_BLEND16(multiply128, (int)av_clip_uintp2((A - 2048) * B / 512. + 2048, 12), 12)
376 DEFINE_BLEND16(negation, 4095 - FFABS(4095 - A - B), 12)
377 DEFINE_BLEND16(extremity, FFABS(4095 - A - B), 12)
378 DEFINE_BLEND16(difference, FFABS(A - B), 12)
379 DEFINE_BLEND16(grainextract, (int)av_clip_uintp2(2048 + A - B, 12), 12)
380 DEFINE_BLEND16(screen, SCREEN(1, A, B), 12)
381 DEFINE_BLEND16(overlay, (A < 2048) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 12)
382 DEFINE_BLEND16(hardlight, (B < 2048) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 12)
383 DEFINE_BLEND16(hardmix, (A < (4095 - B)) ? 0: 4095, 12)
384 DEFINE_BLEND16(heat, (A == 0) ? 0 : 4095 - FFMIN(((4095 - B) * (4095 - B)) / A, 4095), 12)
385 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 4095 - FFMIN(((4095 - A) * (4095 - A)) / B, 4095), 12)
386 DEFINE_BLEND16(darken, FFMIN(A, B), 12)
387 DEFINE_BLEND16(lighten, FFMAX(A, B), 12)
388 DEFINE_BLEND16(divide, (int)av_clip_uintp2(B == 0 ? 4095 : 4095 * A / B, 12), 12)
389 DEFINE_BLEND16(dodge, DODGE(A, B), 12)
390 DEFINE_BLEND16(burn, BURN(A, B), 12)
391 DEFINE_BLEND16(softlight, (A > 2047) ? B + (4095 - B) * (A - 2047.5) / 2047.5 * (0.5 - fabs(B - 2047.5) / 4095): B - B * ((2047.5 - A) / 2047.5) * (0.5 - fabs(B - 2047.5)/4095), 12)
392 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 4095, 12)
393 DEFINE_BLEND16(pinlight, (B < 2048) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 2048)), 12)
394 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 4095, 12)
395 DEFINE_BLEND16(reflect, (B == 4095) ? B : FFMIN(4095, (A * A / (4095 - B))), 12)
396 DEFINE_BLEND16(glow, (A == 4095) ? A : FFMIN(4095, (B * B / (4095 - A))), 12)
397 DEFINE_BLEND16(and, A & B, 12)
398 DEFINE_BLEND16(or, A | B, 12)
399 DEFINE_BLEND16(xor, A ^ B, 12)
400 DEFINE_BLEND16(vividlight, (A < 2048) ? BURN(2 * A, B) : DODGE(2 * (A - 2048), B), 12)
401 DEFINE_BLEND16(linearlight,(int)av_clip_uintp2((B < 2048) ? B + 2 * A - 4095 : B + 2 * (A - 2048), 12), 12)
402 
403 #define DEFINE_BLEND_EXPR(type, name, div) \
404 static void blend_expr_## name(const uint8_t *_top, ptrdiff_t top_linesize, \
405  const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
406  uint8_t *_dst, ptrdiff_t dst_linesize, \
407  ptrdiff_t width, ptrdiff_t height, \
408  FilterParams *param, double *values, int starty) \
409 { \
410  const type *top = (type*)_top; \
411  const type *bottom = (type*)_bottom; \
412  type *dst = (type*)_dst; \
413  AVExpr *e = param->e; \
414  int y, x; \
415  dst_linesize /= div; \
416  top_linesize /= div; \
417  bottom_linesize /= div; \
418  \
419  for (y = 0; y < height; y++) { \
420  values[VAR_Y] = y + starty; \
421  for (x = 0; x < width; x++) { \
422  values[VAR_X] = x; \
423  values[VAR_TOP] = values[VAR_A] = top[x]; \
424  values[VAR_BOTTOM] = values[VAR_B] = bottom[x]; \
425  dst[x] = av_expr_eval(e, values, NULL); \
426  } \
427  dst += dst_linesize; \
428  top += top_linesize; \
429  bottom += bottom_linesize; \
430  } \
431 }
432 
433 DEFINE_BLEND_EXPR(uint8_t, 8bit, 1)
434 DEFINE_BLEND_EXPR(uint16_t, 16bit, 2)
435 
436 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
437 {
438  ThreadData *td = arg;
439  int slice_start = (td->h * jobnr ) / nb_jobs;
440  int slice_end = (td->h * (jobnr+1)) / nb_jobs;
441  int height = slice_end - slice_start;
442  const uint8_t *top = td->top->data[td->plane];
443  const uint8_t *bottom = td->bottom->data[td->plane];
444  uint8_t *dst = td->dst->data[td->plane];
445  double values[VAR_VARS_NB];
446 
447  values[VAR_N] = td->inlink->frame_count_out;
448  values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
449  values[VAR_W] = td->w;
450  values[VAR_H] = td->h;
451  values[VAR_SW] = td->w / (double)td->dst->width;
452  values[VAR_SH] = td->h / (double)td->dst->height;
453 
454  td->param->blend(top + slice_start * td->top->linesize[td->plane],
455  td->top->linesize[td->plane],
456  bottom + slice_start * td->bottom->linesize[td->plane],
457  td->bottom->linesize[td->plane],
458  dst + slice_start * td->dst->linesize[td->plane],
459  td->dst->linesize[td->plane],
460  td->w, height, td->param, &values[0], slice_start);
461  return 0;
462 }
463 
465  const AVFrame *bottom_buf)
466 {
467  BlendContext *s = ctx->priv;
468  AVFilterLink *inlink = ctx->inputs[0];
469  AVFilterLink *outlink = ctx->outputs[0];
470  AVFrame *dst_buf;
471  int plane;
472 
473  dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
474  if (!dst_buf)
475  return top_buf;
476  av_frame_copy_props(dst_buf, top_buf);
477 
478  for (plane = 0; plane < s->nb_planes; plane++) {
479  int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
480  int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
481  int outw = AV_CEIL_RSHIFT(dst_buf->width, hsub);
482  int outh = AV_CEIL_RSHIFT(dst_buf->height, vsub);
483  FilterParams *param = &s->params[plane];
484  ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
485  .w = outw, .h = outh, .param = param, .plane = plane,
486  .inlink = inlink };
487 
488  ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ff_filter_get_nb_threads(ctx)));
489  }
490 
491  if (!s->tblend)
492  av_frame_free(&top_buf);
493 
494  return dst_buf;
495 }
496 
498 {
499  AVFilterContext *ctx = fs->parent;
500  AVFrame *top_buf, *bottom_buf, *dst_buf;
501  int ret;
502 
503  ret = ff_framesync_dualinput_get(fs, &top_buf, &bottom_buf);
504  if (ret < 0)
505  return ret;
506  if (!bottom_buf)
507  return ff_filter_frame(ctx->outputs[0], top_buf);
508  dst_buf = blend_frame(ctx, top_buf, bottom_buf);
509  return ff_filter_frame(ctx->outputs[0], dst_buf);
510 }
511 
513 {
514  BlendContext *s = ctx->priv;
515 
516  s->tblend = !strcmp(ctx->filter->name, "tblend");
517 
519  return 0;
520 }
521 
523 {
524  static const enum AVPixelFormat pix_fmts[] = {
538  };
539 
540  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
541  if (!fmts_list)
542  return AVERROR(ENOMEM);
543  return ff_set_common_formats(ctx, fmts_list);
544 }
545 
547 {
548  BlendContext *s = ctx->priv;
549  int i;
550 
551  ff_framesync_uninit(&s->fs);
553 
554  for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++)
555  av_expr_free(s->params[i].e);
556 }
557 
558 #define DEFINE_INIT_BLEND_FUNC(depth, nbits) \
559 static av_cold void init_blend_func_##depth##_##nbits##bit(FilterParams *param) \
560 { \
561  switch (param->mode) { \
562  case BLEND_ADDITION: param->blend = blend_addition_##depth##bit; break; \
563  case BLEND_GRAINMERGE: param->blend = blend_grainmerge_##depth##bit; break; \
564  case BLEND_AND: param->blend = blend_and_##depth##bit; break; \
565  case BLEND_AVERAGE: param->blend = blend_average_##depth##bit; break; \
566  case BLEND_BURN: param->blend = blend_burn_##depth##bit; break; \
567  case BLEND_DARKEN: param->blend = blend_darken_##depth##bit; break; \
568  case BLEND_DIFFERENCE: param->blend = blend_difference_##depth##bit; break; \
569  case BLEND_GRAINEXTRACT: param->blend = blend_grainextract_##depth##bit; break; \
570  case BLEND_DIVIDE: param->blend = blend_divide_##depth##bit; break; \
571  case BLEND_DODGE: param->blend = blend_dodge_##depth##bit; break; \
572  case BLEND_EXCLUSION: param->blend = blend_exclusion_##depth##bit; break; \
573  case BLEND_EXTREMITY: param->blend = blend_extremity_##depth##bit; break; \
574  case BLEND_FREEZE: param->blend = blend_freeze_##depth##bit; break; \
575  case BLEND_GLOW: param->blend = blend_glow_##depth##bit; break; \
576  case BLEND_HARDLIGHT: param->blend = blend_hardlight_##depth##bit; break; \
577  case BLEND_HARDMIX: param->blend = blend_hardmix_##depth##bit; break; \
578  case BLEND_HEAT: param->blend = blend_heat_##depth##bit; break; \
579  case BLEND_LIGHTEN: param->blend = blend_lighten_##depth##bit; break; \
580  case BLEND_LINEARLIGHT:param->blend = blend_linearlight_##depth##bit;break; \
581  case BLEND_MULTIPLY: param->blend = blend_multiply_##depth##bit; break; \
582  case BLEND_MULTIPLY128:param->blend = blend_multiply128_##depth##bit;break; \
583  case BLEND_NEGATION: param->blend = blend_negation_##depth##bit; break; \
584  case BLEND_NORMAL: param->blend = blend_normal_##nbits##bit; break; \
585  case BLEND_OR: param->blend = blend_or_##depth##bit; break; \
586  case BLEND_OVERLAY: param->blend = blend_overlay_##depth##bit; break; \
587  case BLEND_PHOENIX: param->blend = blend_phoenix_##depth##bit; break; \
588  case BLEND_PINLIGHT: param->blend = blend_pinlight_##depth##bit; break; \
589  case BLEND_REFLECT: param->blend = blend_reflect_##depth##bit; break; \
590  case BLEND_SCREEN: param->blend = blend_screen_##depth##bit; break; \
591  case BLEND_SOFTLIGHT: param->blend = blend_softlight_##depth##bit; break; \
592  case BLEND_SUBTRACT: param->blend = blend_subtract_##depth##bit; break; \
593  case BLEND_VIVIDLIGHT: param->blend = blend_vividlight_##depth##bit; break; \
594  case BLEND_XOR: param->blend = blend_xor_##depth##bit; break; \
595  } \
596 }
598 DEFINE_INIT_BLEND_FUNC(10, 16);
599 DEFINE_INIT_BLEND_FUNC(12, 16);
600 DEFINE_INIT_BLEND_FUNC(16, 16);
601 
603 {
604  switch (depth) {
605  case 8:
606  init_blend_func_8_8bit(param);
607  break;
608  case 10:
609  init_blend_func_10_16bit(param);
610  break;
611  case 12:
612  init_blend_func_12_16bit(param);
613  break;
614  case 16:
615  init_blend_func_16_16bit(param);
616  break;
617  }
618 
619  if (param->opacity == 0 && param->mode != BLEND_NORMAL) {
620  param->blend = depth > 8 ? blend_copytop_16 : blend_copytop_8;
621  } else if (param->mode == BLEND_NORMAL) {
622  if (param->opacity == 1)
623  param->blend = depth > 8 ? blend_copytop_16 : blend_copytop_8;
624  else if (param->opacity == 0)
625  param->blend = depth > 8 ? blend_copybottom_16 : blend_copybottom_8;
626  }
627 
628  if (ARCH_X86)
629  ff_blend_init_x86(param, depth);
630 }
631 
632 static int config_output(AVFilterLink *outlink)
633 {
634  AVFilterContext *ctx = outlink->src;
635  AVFilterLink *toplink = ctx->inputs[TOP];
636  BlendContext *s = ctx->priv;
637  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
638  int ret, plane;
639 
640  if (!s->tblend) {
641  AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
642 
643  if (toplink->format != bottomlink->format) {
644  av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
645  return AVERROR(EINVAL);
646  }
647  if (toplink->w != bottomlink->w || toplink->h != bottomlink->h) {
648  av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
649  "(size %dx%d) do not match the corresponding "
650  "second input link %s parameters (size %dx%d)\n",
651  ctx->input_pads[TOP].name, toplink->w, toplink->h,
652  ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h);
653  return AVERROR(EINVAL);
654  }
655  }
656 
657  outlink->w = toplink->w;
658  outlink->h = toplink->h;
659  outlink->time_base = toplink->time_base;
660  outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
661  outlink->frame_rate = toplink->frame_rate;
662 
663  s->hsub = pix_desc->log2_chroma_w;
664  s->vsub = pix_desc->log2_chroma_h;
665 
666  s->depth = pix_desc->comp[0].depth;
668 
669  if (!s->tblend)
670  if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0)
671  return ret;
672 
673  for (plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) {
674  FilterParams *param = &s->params[plane];
675 
676  if (s->all_mode >= 0)
677  param->mode = s->all_mode;
678  if (s->all_opacity < 1)
679  param->opacity = s->all_opacity;
680 
681  ff_blend_init(param, s->depth);
682 
683  if (s->all_expr && !param->expr_str) {
684  param->expr_str = av_strdup(s->all_expr);
685  if (!param->expr_str)
686  return AVERROR(ENOMEM);
687  }
688  if (param->expr_str) {
689  ret = av_expr_parse(&param->e, param->expr_str, var_names,
690  NULL, NULL, NULL, NULL, 0, ctx);
691  if (ret < 0)
692  return ret;
693  param->blend = s->depth > 8 ? blend_expr_16bit : blend_expr_8bit;
694  }
695  }
696 
697  return s->tblend ? 0 : ff_framesync_configure(&s->fs);
698 }
699 
700 #if CONFIG_BLEND_FILTER
701 
702 static int activate(AVFilterContext *ctx)
703 {
704  BlendContext *s = ctx->priv;
705  return ff_framesync_activate(&s->fs);
706 }
707 
708 static const AVFilterPad blend_inputs[] = {
709  {
710  .name = "top",
711  .type = AVMEDIA_TYPE_VIDEO,
712  },{
713  .name = "bottom",
714  .type = AVMEDIA_TYPE_VIDEO,
715  },
716  { NULL }
717 };
718 
719 static const AVFilterPad blend_outputs[] = {
720  {
721  .name = "default",
722  .type = AVMEDIA_TYPE_VIDEO,
723  .config_props = config_output,
724  },
725  { NULL }
726 };
727 
729  .name = "blend",
730  .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
731  .preinit = blend_framesync_preinit,
732  .init = init,
733  .uninit = uninit,
734  .priv_size = sizeof(BlendContext),
736  .activate = activate,
737  .inputs = blend_inputs,
738  .outputs = blend_outputs,
739  .priv_class = &blend_class,
741 };
742 
743 #endif
744 
745 #if CONFIG_TBLEND_FILTER
746 
747 static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame)
748 {
749  AVFilterContext *ctx = inlink->dst;
750  BlendContext *s = ctx->priv;
751  AVFilterLink *outlink = ctx->outputs[0];
752 
753  if (s->prev_frame) {
754  AVFrame *out;
755 
756  if (ctx->is_disabled)
757  out = av_frame_clone(frame);
758  else
759  out = blend_frame(ctx, frame, s->prev_frame);
761  s->prev_frame = frame;
762  return ff_filter_frame(outlink, out);
763  }
764  s->prev_frame = frame;
765  return 0;
766 }
767 
768 static const AVOption tblend_options[] = {
770  { NULL }
771 };
772 
774 
775 static const AVFilterPad tblend_inputs[] = {
776  {
777  .name = "default",
778  .type = AVMEDIA_TYPE_VIDEO,
779  .filter_frame = tblend_filter_frame,
780  },
781  { NULL }
782 };
783 
784 static const AVFilterPad tblend_outputs[] = {
785  {
786  .name = "default",
787  .type = AVMEDIA_TYPE_VIDEO,
788  .config_props = config_output,
789  },
790  { NULL }
791 };
792 
794  .name = "tblend",
795  .description = NULL_IF_CONFIG_SMALL("Blend successive frames."),
796  .priv_size = sizeof(BlendContext),
797  .priv_class = &tblend_class,
799  .init = init,
800  .uninit = uninit,
801  .inputs = tblend_inputs,
802  .outputs = tblend_outputs,
804 };
805 
806 #endif
int plane
Definition: avisynth_c.h:384
AVExpr * e
Definition: blend.h:68
#define TOP
Definition: vf_blend.c:32
#define NULL
Definition: coverity.c:32
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:389
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
double opacity
Definition: blend.h:67
AVOption.
Definition: opt.h:246
void(* blend)(const uint8_t *top, ptrdiff_t top_linesize, const uint8_t *bottom, ptrdiff_t bottom_linesize, uint8_t *dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, struct FilterParams *param, double *values, int starty)
Definition: blend.h:70
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
void ff_blend_init(FilterParams *param, int depth)
Definition: vf_blend.c:602
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
enum BlendMode all_mode
Definition: vf_blend.c:41
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static void blend_normal_16bit(const uint8_t *_top, ptrdiff_t top_linesize, const uint8_t *_bottom, ptrdiff_t bottom_linesize, uint8_t *_dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, FilterParams *param, double *values, int starty)
Definition: vf_blend.c:162
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
Main libavfilter public API header.
#define BOTTOM
Definition: vf_blend.c:33
Definition: vf_blend.c:51
Definition: vf_blend.c:51
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
static const char *const var_names[]
Definition: vf_blend.c:50
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:117
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
Definition: vf_blend.c:51
int is_disabled
the enabled state from the last expression evaluation
Definition: avfilter.h:385
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
#define DEFINE_INIT_BLEND_FUNC(depth, nbits)
Definition: vf_blend.c:558
int tblend
Definition: vf_blend.c:46
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
Definition: vf_blend.c:51
const char * name
Pad name.
Definition: internal.h:60
AVFilterContext * parent
Parent filter context.
Definition: framesync.h:152
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
char * expr_str
Definition: blend.h:69
#define av_cold
Definition: attributes.h:82
Definition: vf_blend.c:51
AVOptions.
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:361
filter_frame For filters that do not use the activate() callback
#define DEFINE_BLEND8(name, expr)
Definition: vf_blend.c:187
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:379
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:361
const AVFrame * bottom
Definition: vf_blend.c:54
AVFilter ff_vf_blend
#define height
enum BlendMode mode
Definition: blend.h:66
AVFrame * dst
Definition: vf_blend.c:55
int plane
Definition: vf_blend.c:57
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
static int query_formats(AVFilterContext *ctx)
Definition: vf_blend.c:522
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_bm3d.c:696
FilterParams params[4]
Definition: vf_blend.c:45
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
static int config_output(AVFilterLink *outlink)
Definition: vf_blend.c:632
#define av_log(a,...)
AVFrame * prev_frame
Definition: vf_blend.c:47
A filter pad used for either input or output.
Definition: internal.h:54
#define COPY(src, depth)
Definition: vf_blend.c:124
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define td
Definition: regdef.h:70
static av_cold int init(AVFilterContext *ctx)
Definition: vf_blend.c:512
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_blend.c:546
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:293
Frame sync structure.
Definition: framesync.h:146
#define DEFINE_BLEND16(name, expr, depth)
Definition: vf_blend.c:207
#define SCREEN(x, a, b)
Definition: vf_blend.c:237
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
static SoftFloat_IEEE754 multiply(SoftFloat_IEEE754 a, SoftFloat_IEEE754 b)
multiply two softfloats and handle the rounding off
Definition: alsdec.c:1366
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
const char * arg
Definition: jacosubdec.c:66
GLenum GLint * params
Definition: opengl_enc.c:113
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter&#39;s input and try to produce output.
Definition: framesync.c:344
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
Definition: framesync.h:172
Definition: vf_blend.c:51
#define B
Definition: vf_blend.c:234
#define FFMAX(a, b)
Definition: common.h:94
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define A
Definition: vf_blend.c:233
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define NAN
Definition: mathematics.h:64
Definition: vf_blend.c:51
#define FFMIN(a, b)
Definition: common.h:96
#define DEFINE_BLEND_EXPR(type, name, div)
#define DODGE(a, b)
Definition: vf_blend.c:239
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define width
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
AVFilter ff_vf_tblend
FilterParams * param
Definition: vf_blend.c:59
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
static void blend_normal_8bit(const uint8_t *top, ptrdiff_t top_linesize, const uint8_t *bottom, ptrdiff_t bottom_linesize, uint8_t *dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, FilterParams *param, double *values, int starty)
Definition: vf_blend.c:143
Used for passing data between threads.
Definition: af_adeclick.c:487
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define BURN(a, b)
Definition: vf_blend.c:238
#define COMMON_OPTIONS
Definition: vf_blend.c:62
Definition: vf_blend.c:51
filter data
Definition: mlp.h:74
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
FFFrameSync fs
Definition: vf_blend.c:37
static const AVOption blend_options[]
Definition: vf_blend.c:117
FRAMESYNC_DEFINE_CLASS(blend, BlendContext, fs)
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
void ff_blend_init_x86(FilterParams *param, int depth)
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:393
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
#define MULTIPLY(x, a, b)
Definition: vf_blend.c:236
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int nb_planes
Definition: vf_blend.c:39
char * all_expr
Definition: vf_blend.c:40
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
int vsub
chroma subsampling values
Definition: vf_blend.c:38
BlendMode
Definition: blend.h:27
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
double all_opacity
Definition: vf_blend.c:42
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
avfilter_execute_func * execute
Definition: internal.h:155
const AVFrame * top
Definition: vf_blend.c:54
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2029
pixel format definitions
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
static AVFrame * blend_frame(AVFilterContext *ctx, AVFrame *top_buf, const AVFrame *bottom_buf)
Definition: vf_blend.c:464
A list of supported formats for one end of a filter link.
Definition: formats.h:64
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
static int blend_frame_for_dualinput(FFFrameSync *fs)
Definition: vf_blend.c:497
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:326
FILE * out
Definition: movenc.c:54
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVFilterLink * inlink
Definition: vf_blend.c:56
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58