FFmpeg
vf_eq.c
Go to the documentation of this file.
1 /*
2  * Original MPlayer filters by Richard Felker, Hampa Hug, Daniel Moreno,
3  * and Michael Niedermeyer.
4  *
5  * Copyright (c) 2014 James Darnley <james.darnley@gmail.com>
6  * Copyright (c) 2015 Arwa Arif <arwaarif1994@gmail.com>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License along
21  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
22  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23  */
24 
25 /**
26  * @file
27  * very simple video equalizer
28  */
29 
30 #include "libavfilter/internal.h"
31 #include "libavutil/common.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "vf_eq.h"
36 
37 static void create_lut(EQParameters *param)
38 {
39  int i;
40  double g = 1.0 / param->gamma;
41  double lw = 1.0 - param->gamma_weight;
42 
43  for (i = 0; i < 256; i++) {
44  double v = i / 255.0;
45  v = param->contrast * (v - 0.5) + 0.5 + param->brightness;
46 
47  if (v <= 0.0) {
48  param->lut[i] = 0;
49  } else {
50  v = v * lw + pow(v, g) * param->gamma_weight;
51 
52  if (v >= 1.0)
53  param->lut[i] = 255;
54  else
55  param->lut[i] = 256.0 * v;
56  }
57  }
58 
59  param->lut_clean = 1;
60 }
61 
62 static void apply_lut(EQParameters *param, uint8_t *dst, int dst_stride,
63  const uint8_t *src, int src_stride, int w, int h)
64 {
65  int x, y;
66 
67  if (!param->lut_clean)
68  create_lut(param);
69 
70  for (y = 0; y < h; y++) {
71  for (x = 0; x < w; x++) {
72  dst[y * dst_stride + x] = param->lut[src[y * src_stride + x]];
73  }
74  }
75 }
76 
77 static void process_c(EQParameters *param, uint8_t *dst, int dst_stride,
78  const uint8_t *src, int src_stride, int w, int h)
79 {
80  int x, y, pel;
81 
82  int contrast = (int) (param->contrast * 256 * 16);
83  int brightness = ((int) (100.0 * param->brightness + 100.0) * 511) / 200 - 128 - contrast / 32;
84 
85  for (y = 0; y < h; y++) {
86  for (x = 0; x < w; x++) {
87  pel = ((src[y * src_stride + x] * contrast) >> 12) + brightness;
88 
89  if (pel & ~255)
90  pel = (-pel) >> 31;
91 
92  dst[y * dst_stride + x] = pel;
93  }
94  }
95 }
96 
97 static void check_values(EQParameters *param, EQContext *eq)
98 {
99  if (param->contrast == 1.0 && param->brightness == 0.0 && param->gamma == 1.0)
100  param->adjust = NULL;
101  else if (param->gamma == 1.0 && fabs(param->contrast) < 7.9)
102  param->adjust = eq->process;
103  else
104  param->adjust = apply_lut;
105 }
106 
107 static void set_contrast(EQContext *eq)
108 {
109  eq->contrast = av_clipf(av_expr_eval(eq->contrast_pexpr, eq->var_values, eq), -1000.0, 1000.0);
110  eq->param[0].contrast = eq->contrast;
111  eq->param[0].lut_clean = 0;
112  check_values(&eq->param[0], eq);
113 }
114 
116 {
117  eq->brightness = av_clipf(av_expr_eval(eq->brightness_pexpr, eq->var_values, eq), -1.0, 1.0);
118  eq->param[0].brightness = eq->brightness;
119  eq->param[0].lut_clean = 0;
120  check_values(&eq->param[0], eq);
121 }
122 
123 static void set_gamma(EQContext *eq)
124 {
125  int i;
126 
127  eq->gamma = av_clipf(av_expr_eval(eq->gamma_pexpr, eq->var_values, eq), 0.1, 10.0);
128  eq->gamma_r = av_clipf(av_expr_eval(eq->gamma_r_pexpr, eq->var_values, eq), 0.1, 10.0);
129  eq->gamma_g = av_clipf(av_expr_eval(eq->gamma_g_pexpr, eq->var_values, eq), 0.1, 10.0);
130  eq->gamma_b = av_clipf(av_expr_eval(eq->gamma_b_pexpr, eq->var_values, eq), 0.1, 10.0);
131  eq->gamma_weight = av_clipf(av_expr_eval(eq->gamma_weight_pexpr, eq->var_values, eq), 0.0, 1.0);
132 
133  eq->param[0].gamma = eq->gamma * eq->gamma_g;
134  eq->param[1].gamma = sqrt(eq->gamma_b / eq->gamma_g);
135  eq->param[2].gamma = sqrt(eq->gamma_r / eq->gamma_g);
136 
137  for (i = 0; i < 3; i++) {
138  eq->param[i].gamma_weight = eq->gamma_weight;
139  eq->param[i].lut_clean = 0;
140  check_values(&eq->param[i], eq);
141  }
142 }
143 
145 {
146  int i;
147 
148  eq->saturation = av_clipf(av_expr_eval(eq->saturation_pexpr, eq->var_values, eq), 0.0, 3.0);
149 
150  for (i = 1; i < 3; i++) {
151  eq->param[i].contrast = eq->saturation;
152  eq->param[i].lut_clean = 0;
153  check_values(&eq->param[i], eq);
154  }
155 }
156 
157 static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
158 {
159  int ret;
160  AVExpr *old = NULL;
161 
162  if (*pexpr)
163  old = *pexpr;
164  ret = av_expr_parse(pexpr, expr, var_names, NULL, NULL, NULL, NULL, 0, log_ctx);
165  if (ret < 0) {
166  av_log(log_ctx, AV_LOG_ERROR,
167  "Error when parsing the expression '%s' for %s\n",
168  expr, option);
169  *pexpr = old;
170  return ret;
171  }
172 
173  av_expr_free(old);
174  return 0;
175 }
176 
178 {
179  eq->process = process_c;
180  if (ARCH_X86)
181  ff_eq_init_x86(eq);
182 }
183 
185 {
186  EQContext *eq = ctx->priv;
187  int ret;
188  ff_eq_init(eq);
189 
190  if ((ret = set_expr(&eq->contrast_pexpr, eq->contrast_expr, "contrast", ctx)) < 0 ||
191  (ret = set_expr(&eq->brightness_pexpr, eq->brightness_expr, "brightness", ctx)) < 0 ||
192  (ret = set_expr(&eq->saturation_pexpr, eq->saturation_expr, "saturation", ctx)) < 0 ||
193  (ret = set_expr(&eq->gamma_pexpr, eq->gamma_expr, "gamma", ctx)) < 0 ||
194  (ret = set_expr(&eq->gamma_r_pexpr, eq->gamma_r_expr, "gamma_r", ctx)) < 0 ||
195  (ret = set_expr(&eq->gamma_g_pexpr, eq->gamma_g_expr, "gamma_g", ctx)) < 0 ||
196  (ret = set_expr(&eq->gamma_b_pexpr, eq->gamma_b_expr, "gamma_b", ctx)) < 0 ||
197  (ret = set_expr(&eq->gamma_weight_pexpr, eq->gamma_weight_expr, "gamma_weight", ctx)) < 0 )
198  return ret;
199 
200  if (eq->eval_mode == EVAL_MODE_INIT) {
201  set_gamma(eq);
202  set_contrast(eq);
203  set_brightness(eq);
204  set_saturation(eq);
205  }
206 
207  return 0;
208 }
209 
211 {
212  EQContext *eq = ctx->priv;
213 
222 }
223 
225 {
226  EQContext *eq = inlink->dst->priv;
227 
228  eq->var_values[VAR_N] = 0;
229  eq->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
230  NAN : av_q2d(inlink->frame_rate);
231 
232  return 0;
233 }
234 
236 {
237  static const enum AVPixelFormat pixel_fmts_eq[] = {
245  };
246  AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_eq);
247  if (!fmts_list)
248  return AVERROR(ENOMEM);
249  return ff_set_common_formats(ctx, fmts_list);
250 }
251 
253 {
254  AVFilterContext *ctx = inlink->dst;
255  AVFilterLink *outlink = inlink->dst->outputs[0];
256  EQContext *eq = ctx->priv;
257  AVFrame *out;
258  int64_t pos = in->pkt_pos;
259  const AVPixFmtDescriptor *desc;
260  int i;
261 
262  out = ff_get_video_buffer(outlink, inlink->w, inlink->h);
263  if (!out) {
264  av_frame_free(&in);
265  return AVERROR(ENOMEM);
266  }
267 
268  av_frame_copy_props(out, in);
269  desc = av_pix_fmt_desc_get(inlink->format);
270 
271  eq->var_values[VAR_N] = inlink->frame_count_out;
272  eq->var_values[VAR_POS] = pos == -1 ? NAN : pos;
273  eq->var_values[VAR_T] = TS2T(in->pts, inlink->time_base);
274 
275  if (eq->eval_mode == EVAL_MODE_FRAME) {
276  set_gamma(eq);
277  set_contrast(eq);
278  set_brightness(eq);
279  set_saturation(eq);
280  }
281 
282  for (i = 0; i < desc->nb_components; i++) {
283  int w = inlink->w;
284  int h = inlink->h;
285 
286  if (i == 1 || i == 2) {
287  w = AV_CEIL_RSHIFT(w, desc->log2_chroma_w);
288  h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
289  }
290 
291  if (eq->param[i].adjust)
292  eq->param[i].adjust(&eq->param[i], out->data[i], out->linesize[i],
293  in->data[i], in->linesize[i], w, h);
294  else
295  av_image_copy_plane(out->data[i], out->linesize[i],
296  in->data[i], in->linesize[i], w, h);
297  }
298 
299  av_frame_free(&in);
300  return ff_filter_frame(outlink, out);
301 }
302 
303 static inline int set_param(AVExpr **pexpr, const char *args, const char *cmd,
304  void (*set_fn)(EQContext *eq), AVFilterContext *ctx)
305 {
306  EQContext *eq = ctx->priv;
307  int ret;
308  if ((ret = set_expr(pexpr, args, cmd, ctx)) < 0)
309  return ret;
310  if (eq->eval_mode == EVAL_MODE_INIT)
311  set_fn(eq);
312  return 0;
313 }
314 
315 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
316  char *res, int res_len, int flags)
317 {
318  EQContext *eq = ctx->priv;
319 
320 #define SET_PARAM(param_name, set_fn_name) \
321  if (!strcmp(cmd, #param_name)) return set_param(&eq->param_name##_pexpr, args, cmd, set_##set_fn_name, ctx);
322 
323  SET_PARAM(contrast, contrast)
324  else SET_PARAM(brightness, brightness)
325  else SET_PARAM(saturation, saturation)
326  else SET_PARAM(gamma, gamma)
327  else SET_PARAM(gamma_r, gamma)
328  else SET_PARAM(gamma_g, gamma)
329  else SET_PARAM(gamma_b, gamma)
330  else SET_PARAM(gamma_weight, gamma)
331  else return AVERROR(ENOSYS);
332 }
333 
334 static const AVFilterPad eq_inputs[] = {
335  {
336  .name = "default",
337  .type = AVMEDIA_TYPE_VIDEO,
338  .filter_frame = filter_frame,
339  .config_props = config_props,
340  },
341  { NULL }
342 };
343 
344 static const AVFilterPad eq_outputs[] = {
345  {
346  .name = "default",
347  .type = AVMEDIA_TYPE_VIDEO,
348  },
349  { NULL }
350 };
351 
352 #define OFFSET(x) offsetof(EQContext, x)
353 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
354 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
355 static const AVOption eq_options[] = {
356  { "contrast", "set the contrast adjustment, negative values give a negative image",
357  OFFSET(contrast_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, 0, 0, TFLAGS },
358  { "brightness", "set the brightness adjustment",
359  OFFSET(brightness_expr), AV_OPT_TYPE_STRING, {.str = "0.0"}, 0, 0, TFLAGS },
360  { "saturation", "set the saturation adjustment",
361  OFFSET(saturation_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, 0, 0, TFLAGS },
362  { "gamma", "set the initial gamma value",
363  OFFSET(gamma_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, 0, 0, TFLAGS },
364  { "gamma_r", "gamma value for red",
365  OFFSET(gamma_r_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, 0, 0, TFLAGS },
366  { "gamma_g", "gamma value for green",
367  OFFSET(gamma_g_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, 0, 0, TFLAGS },
368  { "gamma_b", "gamma value for blue",
369  OFFSET(gamma_b_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, 0, 0, TFLAGS },
370  { "gamma_weight", "set the gamma weight which reduces the effect of gamma on bright areas",
371  OFFSET(gamma_weight_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, 0, 0, TFLAGS },
372  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
373  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
374  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
375  { NULL }
376 };
377 
379 
381  .name = "eq",
382  .description = NULL_IF_CONFIG_SMALL("Adjust brightness, contrast, gamma, and saturation."),
383  .priv_size = sizeof(EQContext),
384  .priv_class = &eq_class,
385  .inputs = eq_inputs,
386  .outputs = eq_outputs,
389  .init = initialize,
390  .uninit = uninit,
392 };
char * gamma_b_expr
Definition: vf_eq.h:91
#define NULL
Definition: coverity.c:32
static void set_gamma(EQContext *eq)
Definition: vf_eq.c:123
char * gamma_weight_expr
Definition: vf_eq.h:79
AVExpr * gamma_weight_pexpr
Definition: vf_eq.h:80
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
char * gamma_g_expr
Definition: vf_eq.h:87
AVOption.
Definition: opt.h:248
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:585
static int initialize(AVFilterContext *ctx)
Definition: vf_eq.c:184
const char * desc
Definition: libsvtav1.c:79
Definition: aeval.c:48
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define TS2T(ts, tb)
Definition: internal.h:209
static void apply_lut(EQParameters *param, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: vf_eq.c:62
int num
Numerator.
Definition: rational.h:59
double contrast
Definition: vf_eq.h:53
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static int query_formats(AVFilterContext *ctx)
Definition: vf_eq.c:235
static void set_saturation(EQContext *eq)
Definition: vf_eq.c:144
static const AVFilterPad eq_outputs[]
Definition: vf_eq.c:344
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
void(* adjust)(struct EQParameters *eq, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: vf_eq.h:48
double var_values[VAR_NB]
Definition: vf_eq.h:95
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
#define OFFSET(x)
Definition: vf_eq.c:352
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
AVExpr * brightness_pexpr
Definition: vf_eq.h:68
AVExpr * contrast_pexpr
Definition: vf_eq.h:64
void ff_eq_init_x86(EQContext *eq)
Definition: vf_eq_init.c:65
uint8_t
#define av_cold
Definition: attributes.h:88
static const AVFilterPad eq_inputs[]
Definition: vf_eq.c:334
static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
Definition: vf_eq.c:157
AVOptions.
uint8_t lut[256]
Definition: vf_eq.h:51
double gamma_b
Definition: vf_eq.h:93
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:407
Definition: eval.c:157
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
char * contrast_expr
Definition: vf_eq.h:63
static void process_c(EQParameters *param, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: vf_eq.c:77
static int config_props(AVFilterLink *inlink)
Definition: vf_eq.c:224
static const char *const var_names[]
Definition: aeval.c:36
AVExpr * gamma_g_pexpr
Definition: vf_eq.h:88
#define av_log(a,...)
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_eq.c:315
A filter pad used for either input or output.
Definition: internal.h:54
AVExpr * gamma_b_pexpr
Definition: vf_eq.h:92
#define src
Definition: vp8dsp.c:254
static void set_brightness(EQContext *eq)
Definition: vf_eq.c:115
#define FLAGS
Definition: vf_eq.c:353
AVFILTER_DEFINE_CLASS(eq)
void(* process)(struct EQParameters *par, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: vf_eq.h:97
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:588
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
int lut_clean
Definition: vf_eq.h:54
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
static const AVOption eq_options[]
Definition: vf_eq.c:355
void * priv
private data for use by the filter
Definition: avfilter.h:356
enum EQContext::EvalMode eval_mode
unsigned int pos
Definition: spdifenc.c:410
double gamma_g
Definition: vf_eq.h:89
char * gamma_r_expr
Definition: vf_eq.h:83
#define eq(A, B)
Definition: vf_xbr.c:91
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
double gamma_weight
Definition: vf_eq.h:53
#define NAN
Definition: mathematics.h:64
AVExpr * gamma_r_pexpr
Definition: vf_eq.h:84
uint8_t w
Definition: llviddspenc.c:38
static void check_values(EQParameters *param, EQContext *eq)
Definition: vf_eq.c:97
AVFormatContext * ctx
Definition: movenc.c:48
Definition: aeval.c:51
EQParameters param[3]
Definition: vf_eq.h:61
char * brightness_expr
Definition: vf_eq.h:67
double gamma
Definition: vf_eq.h:77
void ff_eq_init(EQContext *eq)
Definition: vf_eq.c:177
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AVExpr * gamma_pexpr
Definition: vf_eq.h:76
static int set_param(AVExpr **pexpr, const char *args, const char *cmd, void(*set_fn)(EQContext *eq), AVFilterContext *ctx)
Definition: vf_eq.c:303
double gamma_weight
Definition: vf_eq.h:81
double brightness
Definition: vf_eq.h:53
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
static void set_contrast(EQContext *eq)
Definition: vf_eq.c:107
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
Definition: vf_eq.h:42
char * gamma_expr
Definition: vf_eq.h:75
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
AVExpr * saturation_pexpr
Definition: vf_eq.h:72
Filter definition.
Definition: avfilter.h:145
option
Definition: libkvazaar.c:316
const char * name
Filter name.
Definition: avfilter.h:149
double gamma
Definition: vf_eq.h:53
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
#define flags(name, subs,...)
Definition: cbs_av1.c:560
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_eq.c:252
#define TFLAGS
Definition: vf_eq.c:354
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
common internal and external API header
double brightness
Definition: vf_eq.h:69
AVFilter ff_vf_eq
Definition: vf_eq.c:380
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
int den
Denominator.
Definition: rational.h:60
static void create_lut(EQParameters *param)
Definition: vf_eq.c:37
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_eq.c:210
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
A list of supported formats for one end of a filter link.
Definition: formats.h:65
double saturation
Definition: vf_eq.h:73
An instance of a filter.
Definition: avfilter.h:341
double gamma_r
Definition: vf_eq.h:85
FILE * out
Definition: movenc.c:54
#define SET_PARAM(param_name, set_fn_name)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:373
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
int i
Definition: input.c:407
char * saturation_expr
Definition: vf_eq.h:71
double contrast
Definition: vf_eq.h:65
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58