FFmpeg
vf_geq.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (C) 2012 Clément Bœsch <u pkh me>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Generic equation change filter
25  * Originally written by Michael Niedermayer for the MPlayer project, and
26  * ported by Clément Bœsch for FFmpeg.
27  */
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "internal.h"
35 
36 static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
38 
39 typedef struct GEQContext {
40  const AVClass *class;
41  AVExpr *e[4]; ///< expressions for each plane
42  char *expr_str[4+3]; ///< expression strings for each plane
43  AVFrame *picref; ///< current input buffer
44  uint8_t *dst; ///< reference pointer to the 8bits output
45  uint16_t *dst16; ///< reference pointer to the 16bits output
46  double values[VAR_VARS_NB]; ///< expression values
47  int hsub, vsub; ///< chroma subsampling
48  int planes; ///< number of planes
49  int is_rgb;
50  int bps;
51 } GEQContext;
52 
53 enum { Y = 0, U, V, A, G, B, R };
54 
55 #define OFFSET(x) offsetof(GEQContext, x)
56 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
57 
58 static const AVOption geq_options[] = {
59  { "lum_expr", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
60  { "lum", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
61  { "cb_expr", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
62  { "cb", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
63  { "cr_expr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
64  { "cr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
65  { "alpha_expr", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
66  { "a", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
67  { "red_expr", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
68  { "r", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
69  { "green_expr", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
70  { "g", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
71  { "blue_expr", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
72  { "b", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
73  {NULL},
74 };
75 
77 
78 static inline double getpix(void *priv, double x, double y, int plane)
79 {
80  int xi, yi;
81  GEQContext *geq = priv;
82  AVFrame *picref = geq->picref;
83  const uint8_t *src = picref->data[plane];
84  int linesize = picref->linesize[plane];
85  const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
86  const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
87 
88  if (!src)
89  return 0;
90 
91  xi = x = av_clipd(x, 0, w - 2);
92  yi = y = av_clipd(y, 0, h - 2);
93 
94  x -= xi;
95  y -= yi;
96 
97  if (geq->bps > 8) {
98  const uint16_t *src16 = (const uint16_t*)src;
99  linesize /= 2;
100 
101  return (1-y)*((1-x)*src16[xi + yi * linesize] + x*src16[xi + 1 + yi * linesize])
102  + y *((1-x)*src16[xi + (yi+1) * linesize] + x*src16[xi + 1 + (yi+1) * linesize]);
103  } else {
104  return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
105  + y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
106  }
107 }
108 
109 //TODO: cubic interpolate
110 //TODO: keep the last few frames
111 static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
112 static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
113 static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
114 static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); }
115 
117 {
118  GEQContext *geq = ctx->priv;
119  int plane, ret = 0;
120 
121  if (!geq->expr_str[Y] && !geq->expr_str[G] && !geq->expr_str[B] && !geq->expr_str[R]) {
122  av_log(ctx, AV_LOG_ERROR, "A luminance or RGB expression is mandatory\n");
123  ret = AVERROR(EINVAL);
124  goto end;
125  }
126  geq->is_rgb = !geq->expr_str[Y];
127 
128  if ((geq->expr_str[Y] || geq->expr_str[U] || geq->expr_str[V]) && (geq->expr_str[G] || geq->expr_str[B] || geq->expr_str[R])) {
129  av_log(ctx, AV_LOG_ERROR, "Either YCbCr or RGB but not both must be specified\n");
130  ret = AVERROR(EINVAL);
131  goto end;
132  }
133 
134  if (!geq->expr_str[U] && !geq->expr_str[V]) {
135  /* No chroma at all: fallback on luma */
136  geq->expr_str[U] = av_strdup(geq->expr_str[Y]);
137  geq->expr_str[V] = av_strdup(geq->expr_str[Y]);
138  } else {
139  /* One chroma unspecified, fallback on the other */
140  if (!geq->expr_str[U]) geq->expr_str[U] = av_strdup(geq->expr_str[V]);
141  if (!geq->expr_str[V]) geq->expr_str[V] = av_strdup(geq->expr_str[U]);
142  }
143 
144  if (!geq->expr_str[A]) {
145  char bps_string[8];
146  snprintf(bps_string, sizeof(bps_string), "%d", (1<<geq->bps) - 1);
147  geq->expr_str[A] = av_strdup(bps_string);
148  }
149  if (!geq->expr_str[G])
150  geq->expr_str[G] = av_strdup("g(X,Y)");
151  if (!geq->expr_str[B])
152  geq->expr_str[B] = av_strdup("b(X,Y)");
153  if (!geq->expr_str[R])
154  geq->expr_str[R] = av_strdup("r(X,Y)");
155 
156  if (geq->is_rgb ?
157  (!geq->expr_str[G] || !geq->expr_str[B] || !geq->expr_str[R])
158  :
159  (!geq->expr_str[U] || !geq->expr_str[V] || !geq->expr_str[A])) {
160  ret = AVERROR(ENOMEM);
161  goto end;
162  }
163 
164  for (plane = 0; plane < 4; plane++) {
165  static double (*p[])(void *, double, double) = { lum, cb, cr, alpha };
166  static const char *const func2_yuv_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
167  static const char *const func2_rgb_names[] = { "g", "b", "r", "alpha", "p", NULL };
168  const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names;
169  double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL };
170 
171  ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names,
172  NULL, NULL, func2_names, func2, 0, ctx);
173  if (ret < 0)
174  break;
175  }
176 
177 end:
178  return ret;
179 }
180 
182 {
183  GEQContext *geq = ctx->priv;
184  static const enum AVPixelFormat yuv_pix_fmts[] = {
202  };
203  static const enum AVPixelFormat rgb_pix_fmts[] = {
211  };
212  AVFilterFormats *fmts_list;
213 
214  if (geq->is_rgb) {
215  fmts_list = ff_make_format_list(rgb_pix_fmts);
216  } else
217  fmts_list = ff_make_format_list(yuv_pix_fmts);
218  if (!fmts_list)
219  return AVERROR(ENOMEM);
220  return ff_set_common_formats(ctx, fmts_list);
221 }
222 
224 {
225  GEQContext *geq = inlink->dst->priv;
227 
228  av_assert0(desc);
229 
230  geq->hsub = desc->log2_chroma_w;
231  geq->vsub = desc->log2_chroma_h;
232  geq->bps = desc->comp[0].depth;
233  geq->planes = desc->nb_components;
234  return 0;
235 }
236 
237 typedef struct ThreadData {
238  int height;
239  int width;
240  int plane;
241  int linesize;
242 } ThreadData;
243 
244 static int slice_geq_filter(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
245 {
246  GEQContext *geq = ctx->priv;
247  ThreadData *td = arg;
248  const int height = td->height;
249  const int width = td->width;
250  const int plane = td->plane;
251  const int linesize = td->linesize;
252  const int slice_start = (height * jobnr) / nb_jobs;
253  const int slice_end = (height * (jobnr+1)) / nb_jobs;
254  int x, y;
255  uint8_t *ptr;
256  uint16_t *ptr16;
257 
258  double values[VAR_VARS_NB];
259  values[VAR_W] = geq->values[VAR_W];
260  values[VAR_H] = geq->values[VAR_H];
261  values[VAR_N] = geq->values[VAR_N];
262  values[VAR_SW] = geq->values[VAR_SW];
263  values[VAR_SH] = geq->values[VAR_SH];
264  values[VAR_T] = geq->values[VAR_T];
265 
266  if (geq->bps == 8) {
267  for (y = slice_start; y < slice_end; y++) {
268  ptr = geq->dst + linesize * y;
269  values[VAR_Y] = y;
270 
271  for (x = 0; x < width; x++) {
272  values[VAR_X] = x;
273  ptr[x] = av_expr_eval(geq->e[plane], values, geq);
274  }
275  ptr += linesize;
276  }
277  }
278  else {
279  for (y = slice_start; y < slice_end; y++) {
280  ptr16 = geq->dst16 + (linesize/2) * y;
281  values[VAR_Y] = y;
282  for (x = 0; x < width; x++) {
283  values[VAR_X] = x;
284  ptr16[x] = av_expr_eval(geq->e[plane], values, geq);
285  }
286  }
287  }
288 
289  return 0;
290 }
291 
293 {
294  int plane;
295  AVFilterContext *ctx = inlink->dst;
296  const int nb_threads = ff_filter_get_nb_threads(ctx);
297  GEQContext *geq = ctx->priv;
298  AVFilterLink *outlink = inlink->dst->outputs[0];
299  AVFrame *out;
300 
301  geq->values[VAR_N] = inlink->frame_count_out,
302  geq->values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
303 
304  geq->picref = in;
305  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
306  if (!out) {
307  av_frame_free(&in);
308  return AVERROR(ENOMEM);
309  }
311 
312  for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
313  const int width = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
314  const int height = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
315  const int linesize = out->linesize[plane];
316  ThreadData td;
317 
318  geq->dst = out->data[plane];
319  geq->dst16 = (uint16_t*)out->data[plane];
320 
321  geq->values[VAR_W] = width;
322  geq->values[VAR_H] = height;
323  geq->values[VAR_SW] = width / (double)inlink->w;
324  geq->values[VAR_SH] = height / (double)inlink->h;
325 
326  td.width = width;
327  td.height = height;
328  td.plane = plane;
329  td.linesize = linesize;
330 
331  ctx->internal->execute(ctx, slice_geq_filter, &td, NULL, FFMIN(height, nb_threads));
332  }
333 
334  av_frame_free(&geq->picref);
335  return ff_filter_frame(outlink, out);
336 }
337 
339 {
340  int i;
341  GEQContext *geq = ctx->priv;
342 
343  for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
344  av_expr_free(geq->e[i]);
345 }
346 
347 static const AVFilterPad geq_inputs[] = {
348  {
349  .name = "default",
350  .type = AVMEDIA_TYPE_VIDEO,
351  .config_props = geq_config_props,
352  .filter_frame = geq_filter_frame,
353  },
354  { NULL }
355 };
356 
357 static const AVFilterPad geq_outputs[] = {
358  {
359  .name = "default",
360  .type = AVMEDIA_TYPE_VIDEO,
361  },
362  { NULL }
363 };
364 
366  .name = "geq",
367  .description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
368  .priv_size = sizeof(GEQContext),
369  .init = geq_init,
370  .uninit = geq_uninit,
372  .inputs = geq_inputs,
373  .outputs = geq_outputs,
374  .priv_class = &geq_class,
376 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
U
@ U
Definition: vf_geq.c:53
VAR_SH
@ VAR_SH
Definition: vf_geq.c:37
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
td
#define td
Definition: regdef.h:70
A
@ A
Definition: vf_geq.c:53
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
getpix
static double getpix(void *priv, double x, double y, int plane)
Definition: vf_geq.c:78
VAR_X
@ VAR_X
Definition: vf_geq.c:37
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
var_names
static const char *const var_names[]
Definition: vf_geq.c:36
out
FILE * out
Definition: movenc.c:54
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:112
geq_query_formats
static int geq_query_formats(AVFilterContext *ctx)
Definition: vf_geq.c:181
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
geq_uninit
static av_cold void geq_uninit(AVFilterContext *ctx)
Definition: vf_geq.c:338
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
geq_outputs
static const AVFilterPad geq_outputs[]
Definition: vf_geq.c:357
Y
@ Y
Definition: vf_geq.c:53
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
pixdesc.h
AVFrame::width
int width
Definition: frame.h:353
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
w
uint8_t w
Definition: llviddspenc.c:38
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
AVOption
AVOption.
Definition: opt.h:246
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
GEQContext::planes
int planes
number of planes
Definition: vf_geq.c:48
B
@ B
Definition: vf_geq.c:53
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1795
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
func2_names
static const char *const func2_names[]
Definition: af_afftfilt.c:119
ThreadData::width
int width
Definition: vf_avgblur.c:62
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:367
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
geq_config_props
static int geq_config_props(AVFilterLink *inlink)
Definition: vf_geq.c:223
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
plane
int plane
Definition: avisynth_c.h:384
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
VAR_T
@ VAR_T
Definition: vf_geq.c:37
VAR_H
@ VAR_H
Definition: vf_geq.c:37
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
src
#define src
Definition: vp8dsp.c:254
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
R
@ R
Definition: vf_geq.c:53
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
OFFSET
#define OFFSET(x)
Definition: vf_geq.c:55
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
ThreadData::plane
int plane
Definition: vf_blend.c:57
width
#define width
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
ThreadData::height
int height
Definition: vf_avgblur.c:61
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2026
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
GEQContext::vsub
int vsub
chroma subsampling
Definition: vf_geq.c:47
ThreadData::linesize
int linesize
Definition: vf_avgblur.c:64
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
ff_vf_geq
AVFilter ff_vf_geq
Definition: vf_geq.c:365
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
ctx
AVFormatContext * ctx
Definition: movenc.c:48
xi
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h2645.c:386
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
geq_options
static const AVOption geq_options[]
Definition: vf_geq.c:58
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:370
AVExpr
Definition: eval.c:157
VAR_Y
@ VAR_Y
Definition: vf_geq.c:37
GEQContext::e
AVExpr * e[4]
expressions for each plane
Definition: vf_geq.c:41
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
NAN
#define NAN
Definition: mathematics.h:64
alpha
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:114
arg
const char * arg
Definition: jacosubdec.c:66
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
GEQContext::picref
AVFrame * picref
current input buffer
Definition: vf_geq.c:43
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:389
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
GEQContext::bps
int bps
Definition: vf_geq.c:50
eval.h
desc
const char * desc
Definition: nvenc.c:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
GEQContext::values
double values[VAR_VARS_NB]
expression values
Definition: vf_geq.c:46
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
geq_init
static av_cold int geq_init(AVFilterContext *ctx)
Definition: vf_geq.c:116
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
FLAGS
#define FLAGS
Definition: vf_geq.c:56
VAR_N
@ VAR_N
Definition: vf_geq.c:37
VAR_SW
@ VAR_SW
Definition: vf_geq.c:37
VAR_VARS_NB
@ VAR_VARS_NB
Definition: vf_geq.c:37
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
GEQContext
Definition: vf_geq.c:39
GEQContext::is_rgb
int is_rgb
Definition: vf_geq.c:49
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
lum
static double lum(void *priv, double x, double y)
Definition: vf_geq.c:111
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
ThreadData
Used for passing data between threads.
Definition: af_adeclick.c:487
V
@ V
Definition: vf_geq.c:53
uint8_t
uint8_t
Definition: audio_convert.c:194
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
GEQContext::expr_str
char * expr_str[4+3]
expression strings for each plane
Definition: vf_geq.c:42
GEQContext::dst
uint8_t * dst
reference pointer to the 8bits output
Definition: vf_geq.c:44
GEQContext::dst16
uint16_t * dst16
reference pointer to the 16bits output
Definition: vf_geq.c:45
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
AVFilter
Filter definition.
Definition: avfilter.h:144
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
GEQContext::hsub
int hsub
Definition: vf_geq.c:47
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
AVFrame::height
int height
Definition: frame.h:353
geq_filter_frame
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_geq.c:292
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(geq)
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
yuv_pix_fmts
static enum AVPixelFormat yuv_pix_fmts[]
Definition: jpeg2000dec.c:249
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:113
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
func2
double(* func2[])(void *, double, double)
Definition: af_afftfilt.c:120
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
rgb_pix_fmts
static enum AVPixelFormat rgb_pix_fmts[]
Definition: jpeg2000dec.c:247
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:227
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
G
@ G
Definition: vf_geq.c:53
snprintf
#define snprintf
Definition: snprintf.h:34
slice_geq_filter
static int slice_geq_filter(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_geq.c:244
geq_inputs
static const AVFilterPad geq_inputs[]
Definition: vf_geq.c:347
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
VAR_W
@ VAR_W
Definition: vf_geq.c:37