FFmpeg
vf_blend.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/imgutils.h"
22 #include "libavutil/eval.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixfmt.h"
25 #include "avfilter.h"
26 #include "formats.h"
27 #include "framesync.h"
28 #include "internal.h"
29 #include "video.h"
30 #include "blend.h"
31 
32 #define TOP 0
33 #define BOTTOM 1
34 
35 typedef struct BlendContext {
36  const AVClass *class;
38  int hsub, vsub; ///< chroma subsampling values
39  int nb_planes;
40  char *all_expr;
42  double all_opacity;
43 
44  int depth;
46  int tblend;
47  AVFrame *prev_frame; /* only used with tblend */
48 } BlendContext;
49 
50 static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
52 
53 typedef struct ThreadData {
54  const AVFrame *top, *bottom;
57  int plane;
58  int w, h;
60 } ThreadData;
61 
62 #define COMMON_OPTIONS \
63  { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
64  { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
65  { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
66  { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
67  { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},\
68  { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },\
69  { "addition128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, "mode" },\
70  { "grainmerge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINMERGE}, 0, 0, FLAGS, "mode" },\
71  { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },\
72  { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },\
73  { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },\
74  { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },\
75  { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },\
76  { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, "mode" },\
77  { "grainextract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GRAINEXTRACT}, 0, 0, FLAGS, "mode" },\
78  { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },\
79  { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },\
80  { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },\
81  { "extremity", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXTREMITY}, 0, 0, FLAGS, "mode" },\
82  { "freeze", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_FREEZE}, 0, 0, FLAGS, "mode" },\
83  { "glow", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GLOW}, 0, 0, FLAGS, "mode" },\
84  { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },\
85  { "hardmix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDMIX}, 0, 0, FLAGS, "mode" },\
86  { "heat", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HEAT}, 0, 0, FLAGS, "mode" },\
87  { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },\
88  { "linearlight","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LINEARLIGHT},0, 0, FLAGS, "mode" },\
89  { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },\
90  { "multiply128","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY128},0, 0, FLAGS, "mode" },\
91  { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },\
92  { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },\
93  { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },\
94  { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },\
95  { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },\
96  { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },\
97  { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },\
98  { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },\
99  { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },\
100  { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },\
101  { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },\
102  { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },\
103  { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
104  { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
105  { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
106  { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
107  { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
108  { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
109  { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
110  { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
111  { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
112  { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS}
113 
114 #define OFFSET(x) offsetof(BlendContext, x)
115 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
116 
117 static const AVOption blend_options[] = {
119  { NULL }
120 };
121 
123 
124 #define COPY(src, depth) \
125 static void blend_copy ## src##_##depth(const uint8_t *top, ptrdiff_t top_linesize, \
126  const uint8_t *bottom, ptrdiff_t bottom_linesize,\
127  uint8_t *dst, ptrdiff_t dst_linesize, \
128  ptrdiff_t width, ptrdiff_t height, \
129  FilterParams *param, double *values, int starty) \
130 { \
131  av_image_copy_plane(dst, dst_linesize, src, src ## _linesize, \
132  width * depth / 8, height); \
133 }
134 
135 COPY(top, 8)
136 COPY(bottom, 8)
137 
138 COPY(top, 16)
139 COPY(bottom, 16)
140 
141 #undef COPY
142 
143 static void blend_normal_8bit(const uint8_t *top, ptrdiff_t top_linesize,
144  const uint8_t *bottom, ptrdiff_t bottom_linesize,
145  uint8_t *dst, ptrdiff_t dst_linesize,
146  ptrdiff_t width, ptrdiff_t height,
147  FilterParams *param, double *values, int starty)
148 {
149  const double opacity = param->opacity;
150  int i, j;
151 
152  for (i = 0; i < height; i++) {
153  for (j = 0; j < width; j++) {
154  dst[j] = top[j] * opacity + bottom[j] * (1. - opacity);
155  }
156  dst += dst_linesize;
157  top += top_linesize;
158  bottom += bottom_linesize;
159  }
160 }
161 
162 static void blend_normal_16bit(const uint8_t *_top, ptrdiff_t top_linesize,
163  const uint8_t *_bottom, ptrdiff_t bottom_linesize,
164  uint8_t *_dst, ptrdiff_t dst_linesize,
165  ptrdiff_t width, ptrdiff_t height,
166  FilterParams *param, double *values, int starty)
167 {
168  const uint16_t *top = (uint16_t*)_top;
169  const uint16_t *bottom = (uint16_t*)_bottom;
170  uint16_t *dst = (uint16_t*)_dst;
171  const double opacity = param->opacity;
172  int i, j;
173  dst_linesize /= 2;
174  top_linesize /= 2;
175  bottom_linesize /= 2;
176 
177  for (i = 0; i < height; i++) {
178  for (j = 0; j < width; j++) {
179  dst[j] = top[j] * opacity + bottom[j] * (1. - opacity);
180  }
181  dst += dst_linesize;
182  top += top_linesize;
183  bottom += bottom_linesize;
184  }
185 }
186 
187 #define DEFINE_BLEND8(name, expr) \
188 static void blend_## name##_8bit(const uint8_t *top, ptrdiff_t top_linesize, \
189  const uint8_t *bottom, ptrdiff_t bottom_linesize, \
190  uint8_t *dst, ptrdiff_t dst_linesize, \
191  ptrdiff_t width, ptrdiff_t height, \
192  FilterParams *param, double *values, int starty) \
193 { \
194  double opacity = param->opacity; \
195  int i, j; \
196  \
197  for (i = 0; i < height; i++) { \
198  for (j = 0; j < width; j++) { \
199  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
200  } \
201  dst += dst_linesize; \
202  top += top_linesize; \
203  bottom += bottom_linesize; \
204  } \
205 }
206 
207 #define DEFINE_BLEND16(name, expr, depth) \
208 static void blend_## name##_##depth##bit(const uint8_t *_top, ptrdiff_t top_linesize,\
209  const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
210  uint8_t *_dst, ptrdiff_t dst_linesize, \
211  ptrdiff_t width, ptrdiff_t height, \
212  FilterParams *param, double *values, int starty) \
213 { \
214  const uint16_t *top = (const uint16_t*)_top; \
215  const uint16_t *bottom = (const uint16_t*)_bottom; \
216  uint16_t *dst = (uint16_t*)_dst; \
217  double opacity = param->opacity; \
218  int i, j; \
219  dst_linesize /= 2; \
220  top_linesize /= 2; \
221  bottom_linesize /= 2; \
222  \
223  for (i = 0; i < height; i++) { \
224  for (j = 0; j < width; j++) { \
225  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
226  } \
227  dst += dst_linesize; \
228  top += top_linesize; \
229  bottom += bottom_linesize; \
230  } \
231 }
232 
233 #define A top[j]
234 #define B bottom[j]
235 
236 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 255))
237 #define SCREEN(x, a, b) (255 - (x) * ((255 - (a)) * (255 - (b)) / 255))
238 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 255 - ((255 - (b)) << 8) / (a)))
239 #define DODGE(a, b) (((a) == 255) ? (a) : FFMIN(255, (((b) << 8) / (255 - (a)))))
240 
241 DEFINE_BLEND8(addition, FFMIN(255, A + B))
242 DEFINE_BLEND8(grainmerge, av_clip_uint8(A + B - 128))
243 DEFINE_BLEND8(average, (A + B) / 2)
244 DEFINE_BLEND8(subtract, FFMAX(0, A - B))
246 DEFINE_BLEND8(multiply128,av_clip_uint8((A - 128) * B / 32. + 128))
247 DEFINE_BLEND8(negation, 255 - FFABS(255 - A - B))
248 DEFINE_BLEND8(extremity, FFABS(255 - A - B))
249 DEFINE_BLEND8(difference, FFABS(A - B))
250 DEFINE_BLEND8(grainextract, av_clip_uint8(128 + A - B))
251 DEFINE_BLEND8(screen, SCREEN(1, A, B))
252 DEFINE_BLEND8(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
253 DEFINE_BLEND8(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
254 DEFINE_BLEND8(hardmix, (A < (255 - B)) ? 0: 255)
255 DEFINE_BLEND8(heat, (A == 0) ? 0 : 255 - FFMIN(((255 - B) * (255 - B)) / A, 255))
256 DEFINE_BLEND8(freeze, (B == 0) ? 0 : 255 - FFMIN(((255 - A) * (255 - A)) / B, 255))
257 DEFINE_BLEND8(darken, FFMIN(A, B))
258 DEFINE_BLEND8(lighten, FFMAX(A, B))
259 DEFINE_BLEND8(divide, av_clip_uint8(B == 0 ? 255 : 255 * A / B))
260 DEFINE_BLEND8(dodge, DODGE(A, B))
261 DEFINE_BLEND8(burn, BURN(A, B))
262 DEFINE_BLEND8(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - fabs(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - fabs(B - 127.5)/255))
263 DEFINE_BLEND8(exclusion, A + B - 2 * A * B / 255)
264 DEFINE_BLEND8(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
265 DEFINE_BLEND8(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
266 DEFINE_BLEND8(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
267 DEFINE_BLEND8(glow, (A == 255) ? A : FFMIN(255, (B * B / (255 - A))))
268 DEFINE_BLEND8(and, A & B)
269 DEFINE_BLEND8(or, A | B)
270 DEFINE_BLEND8(xor, A ^ B)
271 DEFINE_BLEND8(vividlight, (A < 128) ? BURN(2 * A, B) : DODGE(2 * (A - 128), B))
272 DEFINE_BLEND8(linearlight,av_clip_uint8((B < 128) ? B + 2 * A - 255 : B + 2 * (A - 128)))
273 
274 #undef MULTIPLY
275 #undef SCREEN
276 #undef BURN
277 #undef DODGE
278 
279 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 65535))
280 #define SCREEN(x, a, b) (65535 - (x) * ((65535 - (a)) * (65535 - (b)) / 65535))
281 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 65535 - ((65535 - (b)) << 16) / (a)))
282 #define DODGE(a, b) (((a) == 65535) ? (a) : FFMIN(65535, (((b) << 16) / (65535 - (a)))))
283 
284 DEFINE_BLEND16(addition, FFMIN(65535, A + B), 16)
285 DEFINE_BLEND16(grainmerge, av_clip_uint16(A + B - 32768), 16)
286 DEFINE_BLEND16(average, (A + B) / 2, 16)
287 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 16)
289 DEFINE_BLEND16(multiply128, av_clip_uint16((A - 32768) * B / 8192. + 32768), 16)
290 DEFINE_BLEND16(negation, 65535 - FFABS(65535 - A - B), 16)
291 DEFINE_BLEND16(extremity, FFABS(65535 - A - B), 16)
292 DEFINE_BLEND16(difference, FFABS(A - B), 16)
293 DEFINE_BLEND16(grainextract, av_clip_uint16(32768 + A - B), 16)
294 DEFINE_BLEND16(screen, SCREEN(1, A, B), 16)
295 DEFINE_BLEND16(overlay, (A < 32768) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 16)
296 DEFINE_BLEND16(hardlight, (B < 32768) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 16)
297 DEFINE_BLEND16(hardmix, (A < (65535 - B)) ? 0: 65535, 16)
298 DEFINE_BLEND16(heat, (A == 0) ? 0 : 65535 - FFMIN(((65535 - B) * (65535 - B)) / A, 65535), 16)
299 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 65535 - FFMIN(((65535 - A) * (65535 - A)) / B, 65535), 16)
300 DEFINE_BLEND16(darken, FFMIN(A, B), 16)
301 DEFINE_BLEND16(lighten, FFMAX(A, B), 16)
302 DEFINE_BLEND16(divide, av_clip_uint16(B == 0 ? 65535 : 65535 * A / B), 16)
303 DEFINE_BLEND16(dodge, DODGE(A, B), 16)
304 DEFINE_BLEND16(burn, BURN(A, B), 16)
305 DEFINE_BLEND16(softlight, (A > 32767) ? B + (65535 - B) * (A - 32767.5) / 32767.5 * (0.5 - fabs(B - 32767.5) / 65535): B - B * ((32767.5 - A) / 32767.5) * (0.5 - fabs(B - 32767.5)/65535), 16)
306 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 65535, 16)
307 DEFINE_BLEND16(pinlight, (B < 32768) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 32768)), 16)
308 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 65535, 16)
309 DEFINE_BLEND16(reflect, (B == 65535) ? B : FFMIN(65535, (A * A / (65535 - B))), 16)
310 DEFINE_BLEND16(glow, (A == 65535) ? A : FFMIN(65535, (B * B / (65535 - A))), 16)
311 DEFINE_BLEND16(and, A & B, 16)
312 DEFINE_BLEND16(or, A | B, 16)
313 DEFINE_BLEND16(xor, A ^ B, 16)
314 DEFINE_BLEND16(vividlight, (A < 32768) ? BURN(2 * A, B) : DODGE(2 * (A - 32768), B), 16)
315 DEFINE_BLEND16(linearlight,av_clip_uint16((B < 32768) ? B + 2 * A - 65535 : B + 2 * (A - 32768)), 16)
316 
317 #undef MULTIPLY
318 #undef SCREEN
319 #undef BURN
320 #undef DODGE
321 
322 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 1023))
323 #define SCREEN(x, a, b) (1023 - (x) * ((1023 - (a)) * (1023 - (b)) / 1023))
324 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 1023 - ((1023 - (b)) << 10) / (a)))
325 #define DODGE(a, b) (((a) == 1023) ? (a) : FFMIN(1023, (((b) << 10) / (1023 - (a)))))
326 
327 DEFINE_BLEND16(addition, FFMIN(1023, A + B), 10)
328 DEFINE_BLEND16(grainmerge, (int)av_clip_uintp2(A + B - 512, 10), 10)
329 DEFINE_BLEND16(average, (A + B) / 2, 10)
330 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 10)
332 DEFINE_BLEND16(multiply128, (int)av_clip_uintp2((A - 512) * B / 128. + 512, 10), 10)
333 DEFINE_BLEND16(negation, 1023 - FFABS(1023 - A - B), 10)
334 DEFINE_BLEND16(extremity, FFABS(1023 - A - B), 10)
335 DEFINE_BLEND16(difference, FFABS(A - B), 10)
336 DEFINE_BLEND16(grainextract, (int)av_clip_uintp2(512 + A - B, 10), 10)
337 DEFINE_BLEND16(screen, SCREEN(1, A, B), 10)
338 DEFINE_BLEND16(overlay, (A < 512) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 10)
339 DEFINE_BLEND16(hardlight, (B < 512) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 10)
340 DEFINE_BLEND16(hardmix, (A < (1023 - B)) ? 0: 1023, 10)
341 DEFINE_BLEND16(heat, (A == 0) ? 0 : 1023 - FFMIN(((1023 - B) * (1023 - B)) / A, 1023), 10)
342 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 1023 - FFMIN(((1023 - A) * (1023 - A)) / B, 1023), 10)
343 DEFINE_BLEND16(darken, FFMIN(A, B), 10)
344 DEFINE_BLEND16(lighten, FFMAX(A, B), 10)
345 DEFINE_BLEND16(divide, (int)av_clip_uintp2(B == 0 ? 1023 : 1023 * A / B, 10), 10)
346 DEFINE_BLEND16(dodge, DODGE(A, B), 10)
347 DEFINE_BLEND16(burn, BURN(A, B), 10)
348 DEFINE_BLEND16(softlight, (A > 511) ? B + (1023 - B) * (A - 511.5) / 511.5 * (0.5 - fabs(B - 511.5) / 1023): B - B * ((511.5 - A) / 511.5) * (0.5 - fabs(B - 511.5)/1023), 10)
349 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 1023, 10)
350 DEFINE_BLEND16(pinlight, (B < 512) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 512)), 10)
351 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 1023, 10)
352 DEFINE_BLEND16(reflect, (B == 1023) ? B : FFMIN(1023, (A * A / (1023 - B))), 10)
353 DEFINE_BLEND16(glow, (A == 1023) ? A : FFMIN(1023, (B * B / (1023 - A))), 10)
354 DEFINE_BLEND16(and, A & B, 10)
355 DEFINE_BLEND16(or, A | B, 10)
356 DEFINE_BLEND16(xor, A ^ B, 10)
357 DEFINE_BLEND16(vividlight, (A < 512) ? BURN(2 * A, B) : DODGE(2 * (A - 512), B), 10)
358 DEFINE_BLEND16(linearlight,(int)av_clip_uintp2((B < 512) ? B + 2 * A - 1023 : B + 2 * (A - 512), 10), 10)
359 
360 #undef MULTIPLY
361 #undef SCREEN
362 #undef BURN
363 #undef DODGE
364 
365 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 4095))
366 #define SCREEN(x, a, b) (4095 - (x) * ((4095 - (a)) * (4095 - (b)) / 4095))
367 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 4095 - ((4095 - (b)) << 12) / (a)))
368 #define DODGE(a, b) (((a) == 4095) ? (a) : FFMIN(4095, (((b) << 12) / (4095 - (a)))))
369 
370 DEFINE_BLEND16(addition, FFMIN(4095, A + B), 12)
371 DEFINE_BLEND16(grainmerge, (int)av_clip_uintp2(A + B - 2048, 12), 12)
372 DEFINE_BLEND16(average, (A + B) / 2, 12)
373 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 12)
375 DEFINE_BLEND16(multiply128, (int)av_clip_uintp2((A - 2048) * B / 512. + 2048, 12), 12)
376 DEFINE_BLEND16(negation, 4095 - FFABS(4095 - A - B), 12)
377 DEFINE_BLEND16(extremity, FFABS(4095 - A - B), 12)
378 DEFINE_BLEND16(difference, FFABS(A - B), 12)
379 DEFINE_BLEND16(grainextract, (int)av_clip_uintp2(2048 + A - B, 12), 12)
380 DEFINE_BLEND16(screen, SCREEN(1, A, B), 12)
381 DEFINE_BLEND16(overlay, (A < 2048) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 12)
382 DEFINE_BLEND16(hardlight, (B < 2048) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 12)
383 DEFINE_BLEND16(hardmix, (A < (4095 - B)) ? 0: 4095, 12)
384 DEFINE_BLEND16(heat, (A == 0) ? 0 : 4095 - FFMIN(((4095 - B) * (4095 - B)) / A, 4095), 12)
385 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 4095 - FFMIN(((4095 - A) * (4095 - A)) / B, 4095), 12)
386 DEFINE_BLEND16(darken, FFMIN(A, B), 12)
387 DEFINE_BLEND16(lighten, FFMAX(A, B), 12)
388 DEFINE_BLEND16(divide, (int)av_clip_uintp2(B == 0 ? 4095 : 4095 * A / B, 12), 12)
389 DEFINE_BLEND16(dodge, DODGE(A, B), 12)
390 DEFINE_BLEND16(burn, BURN(A, B), 12)
391 DEFINE_BLEND16(softlight, (A > 2047) ? B + (4095 - B) * (A - 2047.5) / 2047.5 * (0.5 - fabs(B - 2047.5) / 4095): B - B * ((2047.5 - A) / 2047.5) * (0.5 - fabs(B - 2047.5)/4095), 12)
392 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 4095, 12)
393 DEFINE_BLEND16(pinlight, (B < 2048) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 2048)), 12)
394 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 4095, 12)
395 DEFINE_BLEND16(reflect, (B == 4095) ? B : FFMIN(4095, (A * A / (4095 - B))), 12)
396 DEFINE_BLEND16(glow, (A == 4095) ? A : FFMIN(4095, (B * B / (4095 - A))), 12)
397 DEFINE_BLEND16(and, A & B, 12)
398 DEFINE_BLEND16(or, A | B, 12)
399 DEFINE_BLEND16(xor, A ^ B, 12)
400 DEFINE_BLEND16(vividlight, (A < 2048) ? BURN(2 * A, B) : DODGE(2 * (A - 2048), B), 12)
401 DEFINE_BLEND16(linearlight,(int)av_clip_uintp2((B < 2048) ? B + 2 * A - 4095 : B + 2 * (A - 2048), 12), 12)
402 
403 #undef MULTIPLY
404 #undef SCREEN
405 #undef BURN
406 #undef DODGE
407 
408 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 511))
409 #define SCREEN(x, a, b) (511 - (x) * ((511 - (a)) * (511 - (b)) / 511))
410 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 511 - ((511 - (b)) << 9) / (a)))
411 #define DODGE(a, b) (((a) == 511) ? (a) : FFMIN(511, (((b) << 9) / (511 - (a)))))
412 
413 DEFINE_BLEND16(addition, FFMIN(511, A + B), 9)
414 DEFINE_BLEND16(grainmerge, (int)av_clip_uintp2(A + B - 256, 9), 9)
415 DEFINE_BLEND16(average, (A + B) / 2, 9)
416 DEFINE_BLEND16(subtract, FFMAX(0, A - B), 9)
418 DEFINE_BLEND16(multiply128, (int)av_clip_uintp2((A - 256) * B / 64. + 256, 9), 9)
419 DEFINE_BLEND16(negation, 511 - FFABS(511 - A - B), 9)
420 DEFINE_BLEND16(extremity, FFABS(511 - A - B), 9)
421 DEFINE_BLEND16(difference, FFABS(A - B), 9)
422 DEFINE_BLEND16(grainextract, (int)av_clip_uintp2(256 + A - B, 9), 9)
423 DEFINE_BLEND16(screen, SCREEN(1, A, B), 9)
424 DEFINE_BLEND16(overlay, (A < 256) ? MULTIPLY(2, A, B) : SCREEN(2, A, B), 9)
425 DEFINE_BLEND16(hardlight, (B < 256) ? MULTIPLY(2, B, A) : SCREEN(2, B, A), 9)
426 DEFINE_BLEND16(hardmix, (A < (511 - B)) ? 0: 511, 9)
427 DEFINE_BLEND16(heat, (A == 0) ? 0 : 511 - FFMIN(((511 - B) * (511 - B)) / A, 511), 9)
428 DEFINE_BLEND16(freeze, (B == 0) ? 0 : 511 - FFMIN(((511 - A) * (511 - A)) / B, 511), 9)
429 DEFINE_BLEND16(darken, FFMIN(A, B), 9)
430 DEFINE_BLEND16(lighten, FFMAX(A, B), 9)
431 DEFINE_BLEND16(divide, (int)av_clip_uintp2(B == 0 ? 511 : 511 * A / B, 9), 9)
432 DEFINE_BLEND16(dodge, DODGE(A, B), 9)
433 DEFINE_BLEND16(burn, BURN(A, B), 9)
434 DEFINE_BLEND16(softlight, (A > 511) ? B + (511 - B) * (A - 511.5) / 511.5 * (0.5 - fabs(B - 511.5) / 511): B - B * ((511.5 - A) / 511.5) * (0.5 - fabs(B - 511.5)/511), 9)
435 DEFINE_BLEND16(exclusion, A + B - 2 * A * B / 511, 9)
436 DEFINE_BLEND16(pinlight, (B < 256) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 256)), 9)
437 DEFINE_BLEND16(phoenix, FFMIN(A, B) - FFMAX(A, B) + 511, 9)
438 DEFINE_BLEND16(reflect, (B == 511) ? B : FFMIN(511, (A * A / (511 - B))), 9)
439 DEFINE_BLEND16(glow, (A == 511) ? A : FFMIN(511, (B * B / (511 - A))), 9)
440 DEFINE_BLEND16(and, A & B, 9)
441 DEFINE_BLEND16(or, A | B, 9)
442 DEFINE_BLEND16(xor, A ^ B, 9)
443 DEFINE_BLEND16(vividlight, (A < 256) ? BURN(2 * A, B) : DODGE(2 * (A - 256), B), 9)
444 DEFINE_BLEND16(linearlight,(int)av_clip_uintp2((B < 256) ? B + 2 * A - 511 : B + 2 * (A - 256), 9), 9)
445 
446 #define DEFINE_BLEND_EXPR(type, name, div) \
447 static void blend_expr_## name(const uint8_t *_top, ptrdiff_t top_linesize, \
448  const uint8_t *_bottom, ptrdiff_t bottom_linesize, \
449  uint8_t *_dst, ptrdiff_t dst_linesize, \
450  ptrdiff_t width, ptrdiff_t height, \
451  FilterParams *param, double *values, int starty) \
452 { \
453  const type *top = (type*)_top; \
454  const type *bottom = (type*)_bottom; \
455  type *dst = (type*)_dst; \
456  AVExpr *e = param->e; \
457  int y, x; \
458  dst_linesize /= div; \
459  top_linesize /= div; \
460  bottom_linesize /= div; \
461  \
462  for (y = 0; y < height; y++) { \
463  values[VAR_Y] = y + starty; \
464  for (x = 0; x < width; x++) { \
465  values[VAR_X] = x; \
466  values[VAR_TOP] = values[VAR_A] = top[x]; \
467  values[VAR_BOTTOM] = values[VAR_B] = bottom[x]; \
468  dst[x] = av_expr_eval(e, values, NULL); \
469  } \
470  dst += dst_linesize; \
471  top += top_linesize; \
472  bottom += bottom_linesize; \
473  } \
474 }
475 
477 DEFINE_BLEND_EXPR(uint16_t, 16bit, 2)
478 
479 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
480 {
481  ThreadData *td = arg;
482  int slice_start = (td->h * jobnr ) / nb_jobs;
483  int slice_end = (td->h * (jobnr+1)) / nb_jobs;
484  int height = slice_end - slice_start;
485  const uint8_t *top = td->top->data[td->plane];
486  const uint8_t *bottom = td->bottom->data[td->plane];
487  uint8_t *dst = td->dst->data[td->plane];
488  double values[VAR_VARS_NB];
489 
490  values[VAR_N] = td->inlink->frame_count_out;
491  values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
492  values[VAR_W] = td->w;
493  values[VAR_H] = td->h;
494  values[VAR_SW] = td->w / (double)td->dst->width;
495  values[VAR_SH] = td->h / (double)td->dst->height;
496 
497  td->param->blend(top + slice_start * td->top->linesize[td->plane],
498  td->top->linesize[td->plane],
499  bottom + slice_start * td->bottom->linesize[td->plane],
500  td->bottom->linesize[td->plane],
501  dst + slice_start * td->dst->linesize[td->plane],
502  td->dst->linesize[td->plane],
503  td->w, height, td->param, &values[0], slice_start);
504  return 0;
505 }
506 
508  const AVFrame *bottom_buf)
509 {
510  BlendContext *s = ctx->priv;
511  AVFilterLink *inlink = ctx->inputs[0];
512  AVFilterLink *outlink = ctx->outputs[0];
513  AVFrame *dst_buf;
514  int plane;
515 
516  dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
517  if (!dst_buf)
518  return top_buf;
519  av_frame_copy_props(dst_buf, top_buf);
520 
521  for (plane = 0; plane < s->nb_planes; plane++) {
522  int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
523  int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
524  int outw = AV_CEIL_RSHIFT(dst_buf->width, hsub);
525  int outh = AV_CEIL_RSHIFT(dst_buf->height, vsub);
526  FilterParams *param = &s->params[plane];
527  ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
528  .w = outw, .h = outh, .param = param, .plane = plane,
529  .inlink = inlink };
530 
532  }
533 
534  if (!s->tblend)
535  av_frame_free(&top_buf);
536 
537  return dst_buf;
538 }
539 
541 {
542  AVFilterContext *ctx = fs->parent;
543  AVFrame *top_buf, *bottom_buf, *dst_buf;
544  int ret;
545 
546  ret = ff_framesync_dualinput_get(fs, &top_buf, &bottom_buf);
547  if (ret < 0)
548  return ret;
549  if (!bottom_buf)
550  return ff_filter_frame(ctx->outputs[0], top_buf);
551  dst_buf = blend_frame(ctx, top_buf, bottom_buf);
552  return ff_filter_frame(ctx->outputs[0], dst_buf);
553 }
554 
556 {
557  BlendContext *s = ctx->priv;
558 
559  s->tblend = !strcmp(ctx->filter->name, "tblend");
560 
561  s->fs.on_event = blend_frame_for_dualinput;
562  return 0;
563 }
564 
566 {
567  static const enum AVPixelFormat pix_fmts[] = {
583  };
584 
586  if (!fmts_list)
587  return AVERROR(ENOMEM);
588  return ff_set_common_formats(ctx, fmts_list);
589 }
590 
592 {
593  BlendContext *s = ctx->priv;
594  int i;
595 
596  ff_framesync_uninit(&s->fs);
597  av_frame_free(&s->prev_frame);
598 
599  for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++)
600  av_expr_free(s->params[i].e);
601 }
602 
603 #define DEFINE_INIT_BLEND_FUNC(depth, nbits) \
604 static av_cold void init_blend_func_##depth##_##nbits##bit(FilterParams *param) \
605 { \
606  switch (param->mode) { \
607  case BLEND_ADDITION: param->blend = blend_addition_##depth##bit; break; \
608  case BLEND_GRAINMERGE: param->blend = blend_grainmerge_##depth##bit; break; \
609  case BLEND_AND: param->blend = blend_and_##depth##bit; break; \
610  case BLEND_AVERAGE: param->blend = blend_average_##depth##bit; break; \
611  case BLEND_BURN: param->blend = blend_burn_##depth##bit; break; \
612  case BLEND_DARKEN: param->blend = blend_darken_##depth##bit; break; \
613  case BLEND_DIFFERENCE: param->blend = blend_difference_##depth##bit; break; \
614  case BLEND_GRAINEXTRACT: param->blend = blend_grainextract_##depth##bit; break; \
615  case BLEND_DIVIDE: param->blend = blend_divide_##depth##bit; break; \
616  case BLEND_DODGE: param->blend = blend_dodge_##depth##bit; break; \
617  case BLEND_EXCLUSION: param->blend = blend_exclusion_##depth##bit; break; \
618  case BLEND_EXTREMITY: param->blend = blend_extremity_##depth##bit; break; \
619  case BLEND_FREEZE: param->blend = blend_freeze_##depth##bit; break; \
620  case BLEND_GLOW: param->blend = blend_glow_##depth##bit; break; \
621  case BLEND_HARDLIGHT: param->blend = blend_hardlight_##depth##bit; break; \
622  case BLEND_HARDMIX: param->blend = blend_hardmix_##depth##bit; break; \
623  case BLEND_HEAT: param->blend = blend_heat_##depth##bit; break; \
624  case BLEND_LIGHTEN: param->blend = blend_lighten_##depth##bit; break; \
625  case BLEND_LINEARLIGHT:param->blend = blend_linearlight_##depth##bit;break; \
626  case BLEND_MULTIPLY: param->blend = blend_multiply_##depth##bit; break; \
627  case BLEND_MULTIPLY128:param->blend = blend_multiply128_##depth##bit;break; \
628  case BLEND_NEGATION: param->blend = blend_negation_##depth##bit; break; \
629  case BLEND_NORMAL: param->blend = blend_normal_##nbits##bit; break; \
630  case BLEND_OR: param->blend = blend_or_##depth##bit; break; \
631  case BLEND_OVERLAY: param->blend = blend_overlay_##depth##bit; break; \
632  case BLEND_PHOENIX: param->blend = blend_phoenix_##depth##bit; break; \
633  case BLEND_PINLIGHT: param->blend = blend_pinlight_##depth##bit; break; \
634  case BLEND_REFLECT: param->blend = blend_reflect_##depth##bit; break; \
635  case BLEND_SCREEN: param->blend = blend_screen_##depth##bit; break; \
636  case BLEND_SOFTLIGHT: param->blend = blend_softlight_##depth##bit; break; \
637  case BLEND_SUBTRACT: param->blend = blend_subtract_##depth##bit; break; \
638  case BLEND_VIVIDLIGHT: param->blend = blend_vividlight_##depth##bit; break; \
639  case BLEND_XOR: param->blend = blend_xor_##depth##bit; break; \
640  } \
641 }
644 DEFINE_INIT_BLEND_FUNC(10, 16);
645 DEFINE_INIT_BLEND_FUNC(12, 16);
646 DEFINE_INIT_BLEND_FUNC(16, 16);
647 
648 void ff_blend_init(FilterParams *param, int depth)
649 {
650  switch (depth) {
651  case 8:
652  init_blend_func_8_8bit(param);
653  break;
654  case 9:
655  init_blend_func_9_16bit(param);
656  break;
657  case 10:
658  init_blend_func_10_16bit(param);
659  break;
660  case 12:
661  init_blend_func_12_16bit(param);
662  break;
663  case 16:
664  init_blend_func_16_16bit(param);
665  break;
666  }
667 
668  if (param->opacity == 0 && param->mode != BLEND_NORMAL) {
669  param->blend = depth > 8 ? blend_copytop_16 : blend_copytop_8;
670  } else if (param->mode == BLEND_NORMAL) {
671  if (param->opacity == 1)
672  param->blend = depth > 8 ? blend_copytop_16 : blend_copytop_8;
673  else if (param->opacity == 0)
674  param->blend = depth > 8 ? blend_copybottom_16 : blend_copybottom_8;
675  }
676 
677  if (ARCH_X86)
678  ff_blend_init_x86(param, depth);
679 }
680 
681 static int config_output(AVFilterLink *outlink)
682 {
683  AVFilterContext *ctx = outlink->src;
684  AVFilterLink *toplink = ctx->inputs[TOP];
685  BlendContext *s = ctx->priv;
686  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
687  int ret, plane;
688 
689  if (!s->tblend) {
690  AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
691 
692  if (toplink->format != bottomlink->format) {
693  av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
694  return AVERROR(EINVAL);
695  }
696  if (toplink->w != bottomlink->w || toplink->h != bottomlink->h) {
697  av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
698  "(size %dx%d) do not match the corresponding "
699  "second input link %s parameters (size %dx%d)\n",
700  ctx->input_pads[TOP].name, toplink->w, toplink->h,
701  ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h);
702  return AVERROR(EINVAL);
703  }
704  }
705 
706  outlink->w = toplink->w;
707  outlink->h = toplink->h;
708  outlink->time_base = toplink->time_base;
709  outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
710  outlink->frame_rate = toplink->frame_rate;
711 
712  s->hsub = pix_desc->log2_chroma_w;
713  s->vsub = pix_desc->log2_chroma_h;
714 
715  s->depth = pix_desc->comp[0].depth;
716  s->nb_planes = av_pix_fmt_count_planes(toplink->format);
717 
718  if (!s->tblend)
719  if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0)
720  return ret;
721 
722  for (plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) {
723  FilterParams *param = &s->params[plane];
724 
725  if (s->all_mode >= 0)
726  param->mode = s->all_mode;
727  if (s->all_opacity < 1)
728  param->opacity = s->all_opacity;
729 
730  ff_blend_init(param, s->depth);
731 
732  if (s->all_expr && !param->expr_str) {
733  param->expr_str = av_strdup(s->all_expr);
734  if (!param->expr_str)
735  return AVERROR(ENOMEM);
736  }
737  if (param->expr_str) {
738  ret = av_expr_parse(&param->e, param->expr_str, var_names,
739  NULL, NULL, NULL, NULL, 0, ctx);
740  if (ret < 0)
741  return ret;
742  param->blend = s->depth > 8 ? blend_expr_16bit : blend_expr_8bit;
743  }
744  }
745 
746  if (s->tblend)
747  return 0;
748 
749  ret = ff_framesync_configure(&s->fs);
750  outlink->time_base = s->fs.time_base;
751 
752  return ret;
753 }
754 
755 #if CONFIG_BLEND_FILTER
756 
757 static int activate(AVFilterContext *ctx)
758 {
759  BlendContext *s = ctx->priv;
760  return ff_framesync_activate(&s->fs);
761 }
762 
763 static const AVFilterPad blend_inputs[] = {
764  {
765  .name = "top",
766  .type = AVMEDIA_TYPE_VIDEO,
767  },{
768  .name = "bottom",
769  .type = AVMEDIA_TYPE_VIDEO,
770  },
771  { NULL }
772 };
773 
774 static const AVFilterPad blend_outputs[] = {
775  {
776  .name = "default",
777  .type = AVMEDIA_TYPE_VIDEO,
778  .config_props = config_output,
779  },
780  { NULL }
781 };
782 
784  .name = "blend",
785  .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
786  .preinit = blend_framesync_preinit,
787  .init = init,
788  .uninit = uninit,
789  .priv_size = sizeof(BlendContext),
791  .activate = activate,
792  .inputs = blend_inputs,
793  .outputs = blend_outputs,
794  .priv_class = &blend_class,
796 };
797 
798 #endif
799 
800 #if CONFIG_TBLEND_FILTER
801 
802 static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame)
803 {
804  AVFilterContext *ctx = inlink->dst;
805  BlendContext *s = ctx->priv;
806  AVFilterLink *outlink = ctx->outputs[0];
807 
808  if (s->prev_frame) {
809  AVFrame *out;
810 
811  if (ctx->is_disabled)
813  else
814  out = blend_frame(ctx, frame, s->prev_frame);
815  av_frame_free(&s->prev_frame);
816  s->prev_frame = frame;
817  return ff_filter_frame(outlink, out);
818  }
819  s->prev_frame = frame;
820  return 0;
821 }
822 
823 static const AVOption tblend_options[] = {
825  { NULL }
826 };
827 
828 AVFILTER_DEFINE_CLASS(tblend);
829 
830 static const AVFilterPad tblend_inputs[] = {
831  {
832  .name = "default",
833  .type = AVMEDIA_TYPE_VIDEO,
834  .filter_frame = tblend_filter_frame,
835  },
836  { NULL }
837 };
838 
839 static const AVFilterPad tblend_outputs[] = {
840  {
841  .name = "default",
842  .type = AVMEDIA_TYPE_VIDEO,
843  .config_props = config_output,
844  },
845  { NULL }
846 };
847 
849  .name = "tblend",
850  .description = NULL_IF_CONFIG_SMALL("Blend successive frames."),
851  .priv_size = sizeof(BlendContext),
852  .priv_class = &tblend_class,
854  .init = init,
855  .uninit = uninit,
856  .inputs = tblend_inputs,
857  .outputs = tblend_outputs,
859 };
860 
861 #endif
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
BlendContext::vsub
int vsub
chroma subsampling values
Definition: vf_blend.c:38
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:117
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
BURN
#define BURN(a, b)
Definition: vf_blend.c:238
COMMON_OPTIONS
#define COMMON_OPTIONS
Definition: vf_blend.c:62
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:293
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
VAR_H
@ VAR_H
Definition: vf_blend.c:51
FilterParams::expr_str
char * expr_str
Definition: blend.h:69
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
VAR_Y
@ VAR_Y
Definition: vf_blend.c:51
AVFrame::width
int width
Definition: frame.h:353
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
AVOption
AVOption.
Definition: opt.h:246
ThreadData::bottom
const AVFrame * bottom
Definition: vf_blend.c:54
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
FilterParams::e
AVExpr * e
Definition: blend.h:68
blend_normal_8bit
static void blend_normal_8bit(const uint8_t *top, ptrdiff_t top_linesize, const uint8_t *bottom, ptrdiff_t bottom_linesize, uint8_t *dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, FilterParams *param, double *values, int starty)
Definition: vf_blend.c:143
A
#define A
Definition: vf_blend.c:233
ThreadData::w
int w
Definition: vf_blend.c:58
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
var_names
static const char *const var_names[]
Definition: vf_blend.c:50
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
FFFrameSync
Frame sync structure.
Definition: framesync.h:146
video.h
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1795
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
VAR_W
@ VAR_W
Definition: vf_blend.c:51
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:58
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
B
#define B
Definition: vf_blend.c:234
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
blend_normal_16bit
static void blend_normal_16bit(const uint8_t *_top, ptrdiff_t top_linesize, const uint8_t *_bottom, ptrdiff_t bottom_linesize, uint8_t *_dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, FilterParams *param, double *values, int starty)
Definition: vf_blend.c:162
plane
int plane
Definition: avisynth_c.h:384
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
VAR_N
@ VAR_N
Definition: vf_blend.c:51
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
DEFINE_BLEND8
#define DEFINE_BLEND8(name, expr)
Definition: vf_blend.c:187
BlendContext::params
FilterParams params[4]
Definition: vf_blend.c:45
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
VAR_X
@ VAR_X
Definition: vf_blend.c:51
DEFINE_BLEND_EXPR
#define DEFINE_BLEND_EXPR(type, name, div)
blend_options
static const AVOption blend_options[]
Definition: vf_blend.c:117
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
VAR_BOTTOM
@ VAR_BOTTOM
Definition: vf_blend.c:51
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
ThreadData::plane
int plane
Definition: vf_blend.c:57
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
MULTIPLY
#define MULTIPLY(x, a, b)
Definition: vf_blend.c:236
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2026
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
ff_vf_tblend
AVFilter ff_vf_tblend
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
ctx
AVFormatContext * ctx
Definition: movenc.c:48
VAR_VARS_NB
@ VAR_VARS_NB
Definition: vf_blend.c:51
multiply
static SoftFloat_IEEE754 multiply(SoftFloat_IEEE754 a, SoftFloat_IEEE754 b)
multiply two softfloats and handle the rounding off
Definition: alsdec.c:1386
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
filter_slice
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_bm3d.c:696
NAN
#define NAN
Definition: mathematics.h:64
FRAMESYNC_DEFINE_CLASS
FRAMESYNC_DEFINE_CLASS(blend, BlendContext, fs)
ThreadData::h
int h
Definition: vf_blend.c:58
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
FilterParams::mode
enum BlendMode mode
Definition: blend.h:66
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
BlendMode
BlendMode
Definition: blend.h:27
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
BlendContext::depth
int depth
Definition: vf_blend.c:44
activate
filter_frame For filters that do not use the activate() callback
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:389
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_blend.c:555
ff_blend_init_x86
void ff_blend_init_x86(FilterParams *param, int depth)
Definition: vf_blend_init.c:103
DEFINE_INIT_BLEND_FUNC
#define DEFINE_INIT_BLEND_FUNC(depth, nbits)
Definition: vf_blend.c:603
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
FilterParams
filter data
Definition: mlp.h:74
eval.h
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:361
DEFINE_BLEND16
#define DEFINE_BLEND16(name, expr, depth)
Definition: vf_blend.c:207
BlendContext::prev_frame
AVFrame * prev_frame
Definition: vf_blend.c:47
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
COPY
#define COPY(src, depth)
Definition: vf_blend.c:124
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_blend.c:565
BlendContext::all_expr
char * all_expr
Definition: vf_blend.c:40
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
BlendContext::tblend
int tblend
Definition: vf_blend.c:46
BlendContext
Definition: vf_blend.c:35
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
VAR_B
@ VAR_B
Definition: vf_blend.c:51
BlendContext::hsub
int hsub
Definition: vf_blend.c:38
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
VAR_T
@ VAR_T
Definition: vf_blend.c:51
ThreadData::param
FilterParams * param
Definition: vf_blend.c:59
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
BlendContext::all_mode
enum BlendMode all_mode
Definition: vf_blend.c:41
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
BlendContext::fs
FFFrameSync fs
Definition: vf_blend.c:37
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
blend.h
ThreadData
Used for passing data between threads.
Definition: af_adeclick.c:487
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
uint8_t
uint8_t
Definition: audio_convert.c:194
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
TOP
#define TOP
Definition: vf_blend.c:32
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
BlendContext::nb_planes
int nb_planes
Definition: vf_blend.c:39
ff_blend_init
void ff_blend_init(FilterParams *param, int depth)
Definition: vf_blend.c:648
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_blend.c:681
AVFilter
Filter definition.
Definition: avfilter.h:144
VAR_A
@ VAR_A
Definition: vf_blend.c:51
ret
ret
Definition: filter_design.txt:187
pixfmt.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
SCREEN
#define SCREEN(x, a, b)
Definition: vf_blend.c:237
VAR_TOP
@ VAR_TOP
Definition: vf_blend.c:51
ThreadData::dst
AVFrame * dst
Definition: vf_blend.c:55
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
DODGE
#define DODGE(a, b)
Definition: vf_blend.c:239
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
ThreadData::inlink
AVFilterLink * inlink
Definition: vf_blend.c:56
AVFrame::height
int height
Definition: frame.h:353
framesync.h
blend_frame
static AVFrame * blend_frame(AVFilterContext *ctx, AVFrame *top_buf, const AVFrame *bottom_buf)
Definition: vf_blend.c:507
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
BlendContext::all_opacity
double all_opacity
Definition: vf_blend.c:42
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_blend.c:591
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
blend_frame_for_dualinput
static int blend_frame_for_dualinput(FFFrameSync *fs)
Definition: vf_blend.c:540
VAR_SW
@ VAR_SW
Definition: vf_blend.c:51
FilterParams::opacity
double opacity
Definition: blend.h:67
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
ThreadData::top
const AVFrame * top
Definition: vf_blend.c:54
FilterParams::blend
void(* blend)(const uint8_t *top, ptrdiff_t top_linesize, const uint8_t *bottom, ptrdiff_t bottom_linesize, uint8_t *dst, ptrdiff_t dst_linesize, ptrdiff_t width, ptrdiff_t height, struct FilterParams *param, double *values, int starty)
Definition: blend.h:70
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:393
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:344
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:379
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
VAR_SH
@ VAR_SH
Definition: vf_blend.c:51
ff_vf_blend
AVFilter ff_vf_blend
BOTTOM
#define BOTTOM
Definition: vf_blend.c:33
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
BLEND_NORMAL
@ BLEND_NORMAL
Definition: blend.h:29