FFmpeg
vf_geq.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (C) 2012 Clément Bœsch <u pkh me>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Generic equation change filter
25  * Originally written by Michael Niedermayer for the MPlayer project, and
26  * ported by Clément Bœsch for FFmpeg.
27  */
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "internal.h"
35 
40 };
41 
42 static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
44 
45 typedef struct GEQContext {
46  const AVClass *class;
47  AVExpr *e[4]; ///< expressions for each plane
48  char *expr_str[4+3]; ///< expression strings for each plane
49  AVFrame *picref; ///< current input buffer
50  uint8_t *dst; ///< reference pointer to the 8bits output
51  uint16_t *dst16; ///< reference pointer to the 16bits output
52  double values[VAR_VARS_NB]; ///< expression values
53  int hsub, vsub; ///< chroma subsampling
54  int planes; ///< number of planes
56  int is_rgb;
57  int bps;
58 } GEQContext;
59 
60 enum { Y = 0, U, V, A, G, B, R };
61 
62 #define OFFSET(x) offsetof(GEQContext, x)
63 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
64 
65 static const AVOption geq_options[] = {
66  { "lum_expr", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
67  { "lum", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
68  { "cb_expr", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
69  { "cb", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
70  { "cr_expr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
71  { "cr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
72  { "alpha_expr", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
73  { "a", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
74  { "red_expr", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
75  { "r", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
76  { "green_expr", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
77  { "g", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
78  { "blue_expr", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
79  { "b", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
80  { "interpolation","set interpolation method", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERP_BILINEAR}, 0, NB_INTERP-1, FLAGS, "interp" },
81  { "i", "set interpolation method", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERP_BILINEAR}, 0, NB_INTERP-1, FLAGS, "interp" },
82  { "nearest", "nearest interpolation", 0, AV_OPT_TYPE_CONST, {.i64=INTERP_NEAREST}, 0, 0, FLAGS, "interp" },
83  { "n", "nearest interpolation", 0, AV_OPT_TYPE_CONST, {.i64=INTERP_NEAREST}, 0, 0, FLAGS, "interp" },
84  { "bilinear", "bilinear interpolation", 0, AV_OPT_TYPE_CONST, {.i64=INTERP_BILINEAR}, 0, 0, FLAGS, "interp" },
85  { "b", "bilinear interpolation", 0, AV_OPT_TYPE_CONST, {.i64=INTERP_BILINEAR}, 0, 0, FLAGS, "interp" },
86  {NULL},
87 };
88 
90 
91 static inline double getpix(void *priv, double x, double y, int plane)
92 {
93  int xi, yi;
94  GEQContext *geq = priv;
95  AVFrame *picref = geq->picref;
96  const uint8_t *src = picref->data[plane];
97  int linesize = picref->linesize[plane];
98  const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
99  const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
100 
101  if (!src)
102  return 0;
103 
104  if (geq->interpolation == INTERP_BILINEAR) {
105  xi = x = av_clipd(x, 0, w - 2);
106  yi = y = av_clipd(y, 0, h - 2);
107 
108  x -= xi;
109  y -= yi;
110 
111  if (geq->bps > 8) {
112  const uint16_t *src16 = (const uint16_t*)src;
113  linesize /= 2;
114 
115  return (1-y)*((1-x)*src16[xi + yi * linesize] + x*src16[xi + 1 + yi * linesize])
116  + y *((1-x)*src16[xi + (yi+1) * linesize] + x*src16[xi + 1 + (yi+1) * linesize]);
117  } else {
118  return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
119  + y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
120  }
121  } else {
122  xi = av_clipd(x, 0, w - 1);
123  yi = av_clipd(y, 0, h - 1);
124 
125  if (geq->bps > 8) {
126  const uint16_t *src16 = (const uint16_t*)src;
127  linesize /= 2;
128 
129  return src16[xi + yi * linesize];
130  } else {
131  return src[xi + yi * linesize];
132  }
133  }
134 }
135 
136 //TODO: cubic interpolate
137 //TODO: keep the last few frames
138 static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
139 static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
140 static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
141 static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); }
142 
144 {
145  GEQContext *geq = ctx->priv;
146  int plane, ret = 0;
147 
148  if (!geq->expr_str[Y] && !geq->expr_str[G] && !geq->expr_str[B] && !geq->expr_str[R]) {
149  av_log(ctx, AV_LOG_ERROR, "A luminance or RGB expression is mandatory\n");
150  ret = AVERROR(EINVAL);
151  goto end;
152  }
153  geq->is_rgb = !geq->expr_str[Y];
154 
155  if ((geq->expr_str[Y] || geq->expr_str[U] || geq->expr_str[V]) && (geq->expr_str[G] || geq->expr_str[B] || geq->expr_str[R])) {
156  av_log(ctx, AV_LOG_ERROR, "Either YCbCr or RGB but not both must be specified\n");
157  ret = AVERROR(EINVAL);
158  goto end;
159  }
160 
161  if (!geq->expr_str[U] && !geq->expr_str[V]) {
162  /* No chroma at all: fallback on luma */
163  geq->expr_str[U] = av_strdup(geq->expr_str[Y]);
164  geq->expr_str[V] = av_strdup(geq->expr_str[Y]);
165  } else {
166  /* One chroma unspecified, fallback on the other */
167  if (!geq->expr_str[U]) geq->expr_str[U] = av_strdup(geq->expr_str[V]);
168  if (!geq->expr_str[V]) geq->expr_str[V] = av_strdup(geq->expr_str[U]);
169  }
170 
171  if (!geq->expr_str[A]) {
172  char bps_string[8];
173  snprintf(bps_string, sizeof(bps_string), "%d", (1<<geq->bps) - 1);
174  geq->expr_str[A] = av_strdup(bps_string);
175  }
176  if (!geq->expr_str[G])
177  geq->expr_str[G] = av_strdup("g(X,Y)");
178  if (!geq->expr_str[B])
179  geq->expr_str[B] = av_strdup("b(X,Y)");
180  if (!geq->expr_str[R])
181  geq->expr_str[R] = av_strdup("r(X,Y)");
182 
183  if (geq->is_rgb ?
184  (!geq->expr_str[G] || !geq->expr_str[B] || !geq->expr_str[R])
185  :
186  (!geq->expr_str[U] || !geq->expr_str[V] || !geq->expr_str[A])) {
187  ret = AVERROR(ENOMEM);
188  goto end;
189  }
190 
191  for (plane = 0; plane < 4; plane++) {
192  static double (*p[])(void *, double, double) = { lum, cb, cr, alpha };
193  static const char *const func2_yuv_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
194  static const char *const func2_rgb_names[] = { "g", "b", "r", "alpha", "p", NULL };
195  const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names;
196  double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL };
197 
198  ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names,
199  NULL, NULL, func2_names, func2, 0, ctx);
200  if (ret < 0)
201  break;
202  }
203 
204 end:
205  return ret;
206 }
207 
209 {
210  GEQContext *geq = ctx->priv;
211  static const enum AVPixelFormat yuv_pix_fmts[] = {
229  };
230  static const enum AVPixelFormat rgb_pix_fmts[] = {
237  AV_PIX_FMT_NONE
238  };
239  AVFilterFormats *fmts_list;
240 
241  if (geq->is_rgb) {
242  fmts_list = ff_make_format_list(rgb_pix_fmts);
243  } else
244  fmts_list = ff_make_format_list(yuv_pix_fmts);
245  if (!fmts_list)
246  return AVERROR(ENOMEM);
247  return ff_set_common_formats(ctx, fmts_list);
248 }
249 
251 {
252  GEQContext *geq = inlink->dst->priv;
254 
255  av_assert0(desc);
256 
257  geq->hsub = desc->log2_chroma_w;
258  geq->vsub = desc->log2_chroma_h;
259  geq->bps = desc->comp[0].depth;
260  geq->planes = desc->nb_components;
261  return 0;
262 }
263 
264 typedef struct ThreadData {
265  int height;
266  int width;
267  int plane;
268  int linesize;
269 } ThreadData;
270 
271 static int slice_geq_filter(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
272 {
273  GEQContext *geq = ctx->priv;
274  ThreadData *td = arg;
275  const int height = td->height;
276  const int width = td->width;
277  const int plane = td->plane;
278  const int linesize = td->linesize;
279  const int slice_start = (height * jobnr) / nb_jobs;
280  const int slice_end = (height * (jobnr+1)) / nb_jobs;
281  int x, y;
282  uint8_t *ptr;
283  uint16_t *ptr16;
284 
285  double values[VAR_VARS_NB];
286  values[VAR_W] = geq->values[VAR_W];
287  values[VAR_H] = geq->values[VAR_H];
288  values[VAR_N] = geq->values[VAR_N];
289  values[VAR_SW] = geq->values[VAR_SW];
290  values[VAR_SH] = geq->values[VAR_SH];
291  values[VAR_T] = geq->values[VAR_T];
292 
293  if (geq->bps == 8) {
294  for (y = slice_start; y < slice_end; y++) {
295  ptr = geq->dst + linesize * y;
296  values[VAR_Y] = y;
297 
298  for (x = 0; x < width; x++) {
299  values[VAR_X] = x;
300  ptr[x] = av_expr_eval(geq->e[plane], values, geq);
301  }
302  ptr += linesize;
303  }
304  }
305  else {
306  for (y = slice_start; y < slice_end; y++) {
307  ptr16 = geq->dst16 + (linesize/2) * y;
308  values[VAR_Y] = y;
309  for (x = 0; x < width; x++) {
310  values[VAR_X] = x;
311  ptr16[x] = av_expr_eval(geq->e[plane], values, geq);
312  }
313  }
314  }
315 
316  return 0;
317 }
318 
320 {
321  int plane;
322  AVFilterContext *ctx = inlink->dst;
323  const int nb_threads = ff_filter_get_nb_threads(ctx);
324  GEQContext *geq = ctx->priv;
325  AVFilterLink *outlink = inlink->dst->outputs[0];
326  AVFrame *out;
327 
328  geq->values[VAR_N] = inlink->frame_count_out,
329  geq->values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
330 
331  geq->picref = in;
332  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
333  if (!out) {
334  av_frame_free(&in);
335  return AVERROR(ENOMEM);
336  }
337  av_frame_copy_props(out, in);
338 
339  for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
340  const int width = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
341  const int height = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
342  const int linesize = out->linesize[plane];
343  ThreadData td;
344 
345  geq->dst = out->data[plane];
346  geq->dst16 = (uint16_t*)out->data[plane];
347 
348  geq->values[VAR_W] = width;
349  geq->values[VAR_H] = height;
350  geq->values[VAR_SW] = width / (double)inlink->w;
351  geq->values[VAR_SH] = height / (double)inlink->h;
352 
353  td.width = width;
354  td.height = height;
355  td.plane = plane;
356  td.linesize = linesize;
357 
358  ctx->internal->execute(ctx, slice_geq_filter, &td, NULL, FFMIN(height, nb_threads));
359  }
360 
361  av_frame_free(&geq->picref);
362  return ff_filter_frame(outlink, out);
363 }
364 
366 {
367  int i;
368  GEQContext *geq = ctx->priv;
369 
370  for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
371  av_expr_free(geq->e[i]);
372 }
373 
374 static const AVFilterPad geq_inputs[] = {
375  {
376  .name = "default",
377  .type = AVMEDIA_TYPE_VIDEO,
378  .config_props = geq_config_props,
379  .filter_frame = geq_filter_frame,
380  },
381  { NULL }
382 };
383 
384 static const AVFilterPad geq_outputs[] = {
385  {
386  .name = "default",
387  .type = AVMEDIA_TYPE_VIDEO,
388  },
389  { NULL }
390 };
391 
393  .name = "geq",
394  .description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
395  .priv_size = sizeof(GEQContext),
396  .init = geq_init,
397  .uninit = geq_uninit,
399  .inputs = geq_inputs,
400  .outputs = geq_outputs,
401  .priv_class = &geq_class,
403 };
int plane
Definition: avisynth_c.h:384
#define NULL
Definition: coverity.c:32
static const AVFilterPad geq_inputs[]
Definition: vf_geq.c:374
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:389
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
Definition: vf_geq.c:43
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
const char * desc
Definition: nvenc.c:68
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static av_cold void geq_uninit(AVFilterContext *ctx)
Definition: vf_geq.c:365
static double getpix(void *priv, double x, double y, int plane)
Definition: vf_geq.c:91
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
static const char *const var_names[]
Definition: vf_geq.c:42
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
static const AVFilterPad geq_outputs[]
Definition: vf_geq.c:384
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:367
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:683
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
Definition: vf_geq.c:60
static enum AVPixelFormat rgb_pix_fmts[]
Definition: jpeg2000dec.c:247
AVFrame * picref
current input buffer
Definition: vf_geq.c:49
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
Definition: vf_geq.c:43
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int geq_query_formats(AVFilterContext *ctx)
Definition: vf_geq.c:208
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:139
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
AVFilter ff_vf_geq
Definition: vf_geq.c:392
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Definition: vf_geq.c:60
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
Definition: eval.c:157
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
int height
Definition: vf_avgblur.c:61
#define height
int plane
Definition: vf_blend.c:57
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int linesize
Definition: vf_avgblur.c:64
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
Definition: vf_geq.c:43
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:141
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
int planes
number of planes
Definition: vf_geq.c:54
#define td
Definition: regdef.h:70
AVExpr * e[4]
expressions for each plane
Definition: vf_geq.c:47
static int geq_config_props(AVFilterLink *inlink)
Definition: vf_geq.c:250
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define OFFSET(x)
Definition: vf_geq.c:62
double values[VAR_VARS_NB]
expression values
Definition: vf_geq.c:52
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
InterpolationMethods
Definition: vf_geq.c:36
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
const char * arg
Definition: jacosubdec.c:66
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
simple assert() macros that are a bit more flexible than ISO C assert().
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
static const AVOption geq_options[]
Definition: vf_geq.c:65
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define NAN
Definition: mathematics.h:64
#define FFMIN(a, b)
Definition: common.h:96
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h2645.c:386
#define width
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
Definition: vf_geq.c:43
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
int bps
Definition: vf_geq.c:57
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
int interpolation
Definition: vf_geq.c:55
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
static enum AVPixelFormat yuv_pix_fmts[]
Definition: jpeg2000dec.c:249
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
Used for passing data between threads.
Definition: dsddec.c:64
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Definition: vf_geq.c:43
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:370
char * expr_str[4+3]
expression strings for each plane
Definition: vf_geq.c:48
int hsub
Definition: vf_geq.c:53
double(* func2[])(void *, double, double)
Definition: af_afftfilt.c:120
#define FLAGS
Definition: vf_geq.c:63
static av_cold int geq_init(AVFilterContext *ctx)
Definition: vf_geq.c:143
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: vf_geq.c:60
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
uint8_t * dst
reference pointer to the 8bits output
Definition: vf_geq.c:50
Definition: vf_geq.c:60
Definition: vf_geq.c:43
Definition: vf_geq.c:60
Definition: vf_geq.c:43
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
uint16_t * dst16
reference pointer to the 16bits output
Definition: vf_geq.c:51
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
static double lum(void *priv, double x, double y)
Definition: vf_geq.c:138
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
Definition: vf_geq.c:60
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
avfilter_execute_func * execute
Definition: internal.h:155
static const char *const func2_names[]
Definition: af_afftfilt.c:119
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2036
Definition: vf_geq.c:43
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:738
int is_rgb
Definition: vf_geq.c:56
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_geq.c:319
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
Definition: vf_geq.c:60
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
static int slice_geq_filter(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_geq.c:271
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:140
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int vsub
chroma subsampling
Definition: vf_geq.c:53
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AVFILTER_DEFINE_CLASS(geq)