FFmpeg
vf_geq.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (C) 2012 Clément Bœsch <u pkh me>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Generic equation change filter
25  * Originally written by Michael Niedermayer for the MPlayer project, and
26  * ported by Clément Bœsch for FFmpeg.
27  */
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "internal.h"
35 
36 #define NB_PLANES 4
37 
42 };
43 
44 static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
46 
47 typedef struct GEQContext {
48  const AVClass *class;
49  AVExpr *e[NB_PLANES]; ///< expressions for each plane
50  char *expr_str[4+3]; ///< expression strings for each plane
51  AVFrame *picref; ///< current input buffer
52  uint8_t *dst; ///< reference pointer to the 8bits output
53  uint16_t *dst16; ///< reference pointer to the 16bits output
54  double values[VAR_VARS_NB]; ///< expression values
55  int hsub, vsub; ///< chroma subsampling
56  int planes; ///< number of planes
58  int is_rgb;
59  int bps;
60 
63 } GEQContext;
64 
65 enum { Y = 0, U, V, A, G, B, R };
66 
67 #define OFFSET(x) offsetof(GEQContext, x)
68 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
69 
70 static const AVOption geq_options[] = {
71  { "lum_expr", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
72  { "lum", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
73  { "cb_expr", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
74  { "cb", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
75  { "cr_expr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
76  { "cr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
77  { "alpha_expr", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
78  { "a", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
79  { "red_expr", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
80  { "r", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
81  { "green_expr", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
82  { "g", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
83  { "blue_expr", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
84  { "b", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
85  { "interpolation","set interpolation method", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERP_BILINEAR}, 0, NB_INTERP-1, FLAGS, "interp" },
86  { "i", "set interpolation method", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERP_BILINEAR}, 0, NB_INTERP-1, FLAGS, "interp" },
87  { "nearest", "nearest interpolation", 0, AV_OPT_TYPE_CONST, {.i64=INTERP_NEAREST}, 0, 0, FLAGS, "interp" },
88  { "n", "nearest interpolation", 0, AV_OPT_TYPE_CONST, {.i64=INTERP_NEAREST}, 0, 0, FLAGS, "interp" },
89  { "bilinear", "bilinear interpolation", 0, AV_OPT_TYPE_CONST, {.i64=INTERP_BILINEAR}, 0, 0, FLAGS, "interp" },
90  { "b", "bilinear interpolation", 0, AV_OPT_TYPE_CONST, {.i64=INTERP_BILINEAR}, 0, 0, FLAGS, "interp" },
91  {NULL},
92 };
93 
95 
96 static inline double getpix(void *priv, double x, double y, int plane)
97 {
98  int xi, yi;
99  GEQContext *geq = priv;
100  AVFrame *picref = geq->picref;
101  const uint8_t *src = picref->data[plane];
102  int linesize = picref->linesize[plane];
103  const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
104  const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
105 
106  if (!src)
107  return 0;
108 
109  if (geq->interpolation == INTERP_BILINEAR) {
110  xi = x = av_clipd(x, 0, w - 2);
111  yi = y = av_clipd(y, 0, h - 2);
112 
113  x -= xi;
114  y -= yi;
115 
116  if (geq->bps > 8) {
117  const uint16_t *src16 = (const uint16_t*)src;
118  linesize /= 2;
119 
120  return (1-y)*((1-x)*src16[xi + yi * linesize] + x*src16[xi + 1 + yi * linesize])
121  + y *((1-x)*src16[xi + (yi+1) * linesize] + x*src16[xi + 1 + (yi+1) * linesize]);
122  } else {
123  return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
124  + y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
125  }
126  } else {
127  xi = av_clipd(x, 0, w - 1);
128  yi = av_clipd(y, 0, h - 1);
129 
130  if (geq->bps > 8) {
131  const uint16_t *src16 = (const uint16_t*)src;
132  linesize /= 2;
133 
134  return src16[xi + yi * linesize];
135  } else {
136  return src[xi + yi * linesize];
137  }
138  }
139 }
140 
141 static int calculate_sums(GEQContext *geq, int plane, int w, int h)
142 {
143  int xi, yi;
144  AVFrame *picref = geq->picref;
145  const uint8_t *src = picref->data[plane];
146  int linesize = picref->linesize[plane];
147 
148  if (!geq->pixel_sums[plane])
149  geq->pixel_sums[plane] = av_malloc_array(w, h * sizeof (*geq->pixel_sums[plane]));
150  if (!geq->pixel_sums[plane])
151  return AVERROR(ENOMEM);
152  if (geq->bps > 8)
153  linesize /= 2;
154  for (yi = 0; yi < h; yi ++) {
155  if (geq->bps > 8) {
156  const uint16_t *src16 = (const uint16_t*)src;
157  double linesum = 0;
158 
159  for (xi = 0; xi < w; xi ++) {
160  linesum += src16[xi + yi * linesize];
161  geq->pixel_sums[plane][xi + yi * w] = linesum;
162  }
163  } else {
164  double linesum = 0;
165 
166  for (xi = 0; xi < w; xi ++) {
167  linesum += src[xi + yi * linesize];
168  geq->pixel_sums[plane][xi + yi * w] = linesum;
169  }
170  }
171  if (yi)
172  for (xi = 0; xi < w; xi ++) {
173  geq->pixel_sums[plane][xi + yi * w] += geq->pixel_sums[plane][xi + yi * w - w];
174  }
175  }
176  return 0;
177 }
178 
179 static inline double getpix_integrate_internal(GEQContext *geq, int x, int y, int plane, int w, int h)
180 {
181  if (x > w - 1) {
182  double boundary = getpix_integrate_internal(geq, w - 1, y, plane, w, h);
183  return 2*boundary - getpix_integrate_internal(geq, 2*(w - 1) - x, y, plane, w, h);
184  } else if (y > h - 1) {
185  double boundary = getpix_integrate_internal(geq, x, h - 1, plane, w, h);
186  return 2*boundary - getpix_integrate_internal(geq, x, 2*(h - 1) - y, plane, w, h);
187  } else if (x < 0) {
188  if (x == -1) return 0;
189  return - getpix_integrate_internal(geq, -x-2, y, plane, w, h);
190  } else if (y < 0) {
191  if (y == -1) return 0;
192  return - getpix_integrate_internal(geq, x, -y-2, plane, w, h);
193  }
194 
195  return geq->pixel_sums[plane][x + y * w];
196 }
197 
198 static inline double getpix_integrate(void *priv, double x, double y, int plane) {
199  GEQContext *geq = priv;
200  AVFrame *picref = geq->picref;
201  const uint8_t *src = picref->data[plane];
202  const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
203  const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
204 
205  if (!src)
206  return 0;
207 
208  return getpix_integrate_internal(geq, lrint(av_clipd(x, -w, 2*w)), lrint(av_clipd(y, -h, 2*h)), plane, w, h);
209 }
210 
211 //TODO: cubic interpolate
212 //TODO: keep the last few frames
213 static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
214 static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
215 static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
216 static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); }
217 
218 static double lumsum(void *priv, double x, double y) { return getpix_integrate(priv, x, y, 0); }
219 static double cbsum(void *priv, double x, double y) { return getpix_integrate(priv, x, y, 1); }
220 static double crsub(void *priv, double x, double y) { return getpix_integrate(priv, x, y, 2); }
221 static double alphasum(void *priv, double x, double y) { return getpix_integrate(priv, x, y, 3); }
222 
224 {
225  GEQContext *geq = ctx->priv;
226  int plane, ret = 0;
227 
228  if (!geq->expr_str[Y] && !geq->expr_str[G] && !geq->expr_str[B] && !geq->expr_str[R]) {
229  av_log(ctx, AV_LOG_ERROR, "A luminance or RGB expression is mandatory\n");
230  ret = AVERROR(EINVAL);
231  goto end;
232  }
233  geq->is_rgb = !geq->expr_str[Y];
234 
235  if ((geq->expr_str[Y] || geq->expr_str[U] || geq->expr_str[V]) && (geq->expr_str[G] || geq->expr_str[B] || geq->expr_str[R])) {
236  av_log(ctx, AV_LOG_ERROR, "Either YCbCr or RGB but not both must be specified\n");
237  ret = AVERROR(EINVAL);
238  goto end;
239  }
240 
241  if (!geq->expr_str[U] && !geq->expr_str[V]) {
242  /* No chroma at all: fallback on luma */
243  geq->expr_str[U] = av_strdup(geq->expr_str[Y]);
244  geq->expr_str[V] = av_strdup(geq->expr_str[Y]);
245  } else {
246  /* One chroma unspecified, fallback on the other */
247  if (!geq->expr_str[U]) geq->expr_str[U] = av_strdup(geq->expr_str[V]);
248  if (!geq->expr_str[V]) geq->expr_str[V] = av_strdup(geq->expr_str[U]);
249  }
250 
251  if (!geq->expr_str[A]) {
252  char bps_string[8];
253  snprintf(bps_string, sizeof(bps_string), "%d", (1<<geq->bps) - 1);
254  geq->expr_str[A] = av_strdup(bps_string);
255  }
256  if (!geq->expr_str[G])
257  geq->expr_str[G] = av_strdup("g(X,Y)");
258  if (!geq->expr_str[B])
259  geq->expr_str[B] = av_strdup("b(X,Y)");
260  if (!geq->expr_str[R])
261  geq->expr_str[R] = av_strdup("r(X,Y)");
262 
263  if (geq->is_rgb ?
264  (!geq->expr_str[G] || !geq->expr_str[B] || !geq->expr_str[R])
265  :
266  (!geq->expr_str[U] || !geq->expr_str[V] || !geq->expr_str[A])) {
267  ret = AVERROR(ENOMEM);
268  goto end;
269  }
270 
271  for (plane = 0; plane < NB_PLANES; plane++) {
272  static double (*p[])(void *, double, double) = {
273  lum , cb , cr , alpha ,
275  };
276  static const char *const func2_yuv_names[] = {
277  "lum" , "cb" , "cr" , "alpha" , "p",
278  "lumsum", "cbsum", "crsum", "alphasum", "psum",
279  NULL };
280  static const char *const func2_rgb_names[] = {
281  "g" , "b" , "r" , "alpha" , "p",
282  "gsum", "bsum", "rsum", "alphasum", "psum",
283  NULL };
284  const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names;
285  double (*func2[])(void *, double, double) = {
286  lum , cb , cr , alpha , p[plane],
287  lumsum, cbsum, crsub, alphasum, p[plane + 4],
288  NULL };
289  int counter[10] = {0};
290 
291  ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names,
292  NULL, NULL, func2_names, func2, 0, ctx);
293  if (ret < 0)
294  break;
295 
296  av_expr_count_func(geq->e[plane], counter, FF_ARRAY_ELEMS(counter), 2);
297  geq->needs_sum[plane] = counter[5] + counter[6] + counter[7] + counter[8] + counter[9];
298  }
299 
300 end:
301  return ret;
302 }
303 
305 {
306  GEQContext *geq = ctx->priv;
307  static const enum AVPixelFormat yuv_pix_fmts[] = {
325  };
326  static const enum AVPixelFormat rgb_pix_fmts[] = {
333  AV_PIX_FMT_NONE
334  };
335  AVFilterFormats *fmts_list;
336 
337  if (geq->is_rgb) {
338  fmts_list = ff_make_format_list(rgb_pix_fmts);
339  } else
340  fmts_list = ff_make_format_list(yuv_pix_fmts);
341  if (!fmts_list)
342  return AVERROR(ENOMEM);
343  return ff_set_common_formats(ctx, fmts_list);
344 }
345 
347 {
348  GEQContext *geq = inlink->dst->priv;
350 
351  av_assert0(desc);
352 
353  geq->hsub = desc->log2_chroma_w;
354  geq->vsub = desc->log2_chroma_h;
355  geq->bps = desc->comp[0].depth;
356  geq->planes = desc->nb_components;
357  return 0;
358 }
359 
360 typedef struct ThreadData {
361  int height;
362  int width;
363  int plane;
364  int linesize;
365 } ThreadData;
366 
367 static int slice_geq_filter(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
368 {
369  GEQContext *geq = ctx->priv;
370  ThreadData *td = arg;
371  const int height = td->height;
372  const int width = td->width;
373  const int plane = td->plane;
374  const int linesize = td->linesize;
375  const int slice_start = (height * jobnr) / nb_jobs;
376  const int slice_end = (height * (jobnr+1)) / nb_jobs;
377  int x, y;
378  uint8_t *ptr;
379  uint16_t *ptr16;
380 
381  double values[VAR_VARS_NB];
382  values[VAR_W] = geq->values[VAR_W];
383  values[VAR_H] = geq->values[VAR_H];
384  values[VAR_N] = geq->values[VAR_N];
385  values[VAR_SW] = geq->values[VAR_SW];
386  values[VAR_SH] = geq->values[VAR_SH];
387  values[VAR_T] = geq->values[VAR_T];
388 
389  if (geq->bps == 8) {
390  for (y = slice_start; y < slice_end; y++) {
391  ptr = geq->dst + linesize * y;
392  values[VAR_Y] = y;
393 
394  for (x = 0; x < width; x++) {
395  values[VAR_X] = x;
396  ptr[x] = av_expr_eval(geq->e[plane], values, geq);
397  }
398  ptr += linesize;
399  }
400  }
401  else {
402  for (y = slice_start; y < slice_end; y++) {
403  ptr16 = geq->dst16 + (linesize/2) * y;
404  values[VAR_Y] = y;
405  for (x = 0; x < width; x++) {
406  values[VAR_X] = x;
407  ptr16[x] = av_expr_eval(geq->e[plane], values, geq);
408  }
409  }
410  }
411 
412  return 0;
413 }
414 
416 {
417  int plane;
418  AVFilterContext *ctx = inlink->dst;
419  const int nb_threads = ff_filter_get_nb_threads(ctx);
420  GEQContext *geq = ctx->priv;
421  AVFilterLink *outlink = inlink->dst->outputs[0];
422  AVFrame *out;
423 
424  geq->values[VAR_N] = inlink->frame_count_out,
425  geq->values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
426 
427  geq->picref = in;
428  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
429  if (!out) {
430  av_frame_free(&in);
431  return AVERROR(ENOMEM);
432  }
433  av_frame_copy_props(out, in);
434 
435  for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
436  const int width = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
437  const int height = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
438  const int linesize = out->linesize[plane];
439  ThreadData td;
440 
441  geq->dst = out->data[plane];
442  geq->dst16 = (uint16_t*)out->data[plane];
443 
444  geq->values[VAR_W] = width;
445  geq->values[VAR_H] = height;
446  geq->values[VAR_SW] = width / (double)inlink->w;
447  geq->values[VAR_SH] = height / (double)inlink->h;
448 
449  td.width = width;
450  td.height = height;
451  td.plane = plane;
452  td.linesize = linesize;
453 
454  if (geq->needs_sum[plane])
455  calculate_sums(geq, plane, width, height);
456 
457  ctx->internal->execute(ctx, slice_geq_filter, &td, NULL, FFMIN(height, nb_threads));
458  }
459 
460  av_frame_free(&geq->picref);
461  return ff_filter_frame(outlink, out);
462 }
463 
465 {
466  int i;
467  GEQContext *geq = ctx->priv;
468 
469  for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
470  av_expr_free(geq->e[i]);
471  for (i = 0; i < NB_PLANES; i++)
472  av_freep(&geq->pixel_sums);
473 }
474 
475 static const AVFilterPad geq_inputs[] = {
476  {
477  .name = "default",
478  .type = AVMEDIA_TYPE_VIDEO,
479  .config_props = geq_config_props,
480  .filter_frame = geq_filter_frame,
481  },
482  { NULL }
483 };
484 
485 static const AVFilterPad geq_outputs[] = {
486  {
487  .name = "default",
488  .type = AVMEDIA_TYPE_VIDEO,
489  },
490  { NULL }
491 };
492 
494  .name = "geq",
495  .description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
496  .priv_size = sizeof(GEQContext),
497  .init = geq_init,
498  .uninit = geq_uninit,
500  .inputs = geq_inputs,
501  .outputs = geq_outputs,
502  .priv_class = &geq_class,
504 };
int plane
Definition: avisynth_c.h:384
#define NULL
Definition: coverity.c:32
static const AVFilterPad geq_inputs[]
Definition: vf_geq.c:475
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:389
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
Definition: vf_geq.c:45
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
Definition: vf_geq.c:45
const char * desc
Definition: nvenc.c:68
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int av_expr_count_func(AVExpr *e, unsigned *counter, int size, int arg)
Track the presence of user provided functions and their number of occurrences in a parsed expression...
Definition: eval.c:761
static av_cold void geq_uninit(AVFilterContext *ctx)
Definition: vf_geq.c:464
static double getpix(void *priv, double x, double y, int plane)
Definition: vf_geq.c:96
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
double * pixel_sums[NB_PLANES]
Definition: vf_geq.c:61
static const char *const var_names[]
Definition: vf_geq.c:44
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
static const AVFilterPad geq_outputs[]
Definition: vf_geq.c:485
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:367
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
static double lumsum(void *priv, double x, double y)
Definition: vf_geq.c:218
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static double crsub(void *priv, double x, double y)
Definition: vf_geq.c:220
#define src
Definition: vp8dsp.c:254
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
static enum AVPixelFormat rgb_pix_fmts[]
Definition: jpeg2000dec.c:247
AVFrame * picref
current input buffer
Definition: vf_geq.c:51
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
#define NB_PLANES
Definition: vf_geq.c:36
Definition: vf_geq.c:45
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int geq_query_formats(AVFilterContext *ctx)
Definition: vf_geq.c:304
Definition: vf_geq.c:65
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:214
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
Definition: vf_geq.c:65
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
AVFilter ff_vf_geq
Definition: vf_geq.c:493
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
Definition: eval.c:157
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
int height
Definition: vf_avgblur.c:61
#define height
int plane
Definition: vf_blend.c:57
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int linesize
Definition: vf_avgblur.c:64
static double getpix_integrate(void *priv, double x, double y, int plane)
Definition: vf_geq.c:198
Definition: vf_geq.c:65
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
#define av_log(a,...)
static int calculate_sums(GEQContext *geq, int plane, int w, int h)
Definition: vf_geq.c:141
A filter pad used for either input or output.
Definition: internal.h:54
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:216
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
int planes
number of planes
Definition: vf_geq.c:56
#define td
Definition: regdef.h:70
static int geq_config_props(AVFilterLink *inlink)
Definition: vf_geq.c:346
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define OFFSET(x)
Definition: vf_geq.c:67
double values[VAR_VARS_NB]
expression values
Definition: vf_geq.c:54
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
InterpolationMethods
Definition: vf_geq.c:38
Definition: vf_geq.c:65
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
const char * arg
Definition: jacosubdec.c:66
Definition: vf_geq.c:45
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
simple assert() macros that are a bit more flexible than ISO C assert().
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
static const AVOption geq_options[]
Definition: vf_geq.c:70
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
Definition: vf_geq.c:65
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:784
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define NAN
Definition: mathematics.h:64
#define FFMIN(a, b)
Definition: common.h:96
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h2645.c:386
#define width
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
static double cbsum(void *priv, double x, double y)
Definition: vf_geq.c:219
int needs_sum[NB_PLANES]
Definition: vf_geq.c:62
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
int bps
Definition: vf_geq.c:59
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
int interpolation
Definition: vf_geq.c:57
if(ret)
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
static enum AVPixelFormat yuv_pix_fmts[]
Definition: jpeg2000dec.c:249
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
Used for passing data between threads.
Definition: dsddec.c:64
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:370
char * expr_str[4+3]
expression strings for each plane
Definition: vf_geq.c:50
int hsub
Definition: vf_geq.c:55
double(* func2[])(void *, double, double)
Definition: af_afftfilt.c:120
#define FLAGS
Definition: vf_geq.c:68
AVExpr * e[NB_PLANES]
expressions for each plane
Definition: vf_geq.c:49
static av_cold int geq_init(AVFilterContext *ctx)
Definition: vf_geq.c:223
Definition: vf_geq.c:45
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
uint8_t * dst
reference pointer to the 8bits output
Definition: vf_geq.c:52
static double alphasum(void *priv, double x, double y)
Definition: vf_geq.c:221
Definition: vf_geq.c:45
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
#define flags(name, subs,...)
Definition: cbs_av1.c:564
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
uint16_t * dst16
reference pointer to the 16bits output
Definition: vf_geq.c:53
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
static double lum(void *priv, double x, double y)
Definition: vf_geq.c:213
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
avfilter_execute_func * execute
Definition: internal.h:144
static const char *const func2_names[]
Definition: af_afftfilt.c:119
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2035
Definition: vf_geq.c:45
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
int is_rgb
Definition: vf_geq.c:58
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_geq.c:415
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define lrint
Definition: tablegen.h:53
An instance of a filter.
Definition: avfilter.h:338
static double getpix_integrate_internal(GEQContext *geq, int x, int y, int plane, int w, int h)
Definition: vf_geq.c:179
Definition: vf_geq.c:65
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
static int slice_geq_filter(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_geq.c:367
#define av_malloc_array(a, b)
Definition: vf_geq.c:45
internal API functions
Definition: vf_geq.c:65
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:215
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int vsub
chroma subsampling
Definition: vf_geq.c:55
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AVFILTER_DEFINE_CLASS(geq)