FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_geq.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (C) 2012 Clément Bœsch <u pkh me>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Generic equation change filter
25  * Originally written by Michael Niedermayer for the MPlayer project, and
26  * ported by Clément Bœsch for FFmpeg.
27  */
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "internal.h"
35 
36 typedef struct GEQContext {
37  const AVClass *class;
38  AVExpr *e[4]; ///< expressions for each plane
39  char *expr_str[4+3]; ///< expression strings for each plane
40  AVFrame *picref; ///< current input buffer
41  int hsub, vsub; ///< chroma subsampling
42  int planes; ///< number of planes
43  int is_rgb;
44  int bps;
45 } GEQContext;
46 
47 enum { Y = 0, U, V, A, G, B, R };
48 
49 #define OFFSET(x) offsetof(GEQContext, x)
50 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
51 
52 static const AVOption geq_options[] = {
53  { "lum_expr", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
54  { "lum", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
55  { "cb_expr", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
56  { "cb", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
57  { "cr_expr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
58  { "cr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
59  { "alpha_expr", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
60  { "a", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
61  { "red_expr", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
62  { "r", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
63  { "green_expr", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
64  { "g", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
65  { "blue_expr", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
66  { "b", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
67  {NULL},
68 };
69 
71 
72 static inline double getpix(void *priv, double x, double y, int plane)
73 {
74  int xi, yi;
75  GEQContext *geq = priv;
76  AVFrame *picref = geq->picref;
77  const uint8_t *src = picref->data[plane];
78  int linesize = picref->linesize[plane];
79  const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
80  const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
81 
82  if (!src)
83  return 0;
84 
85  xi = x = av_clipf(x, 0, w - 2);
86  yi = y = av_clipf(y, 0, h - 2);
87 
88  x -= xi;
89  y -= yi;
90 
91  if (geq->bps > 8) {
92  const uint16_t *src16 = (const uint16_t*)src;
93  linesize /= 2;
94 
95  return (1-y)*((1-x)*src16[xi + yi * linesize] + x*src16[xi + 1 + yi * linesize])
96  + y *((1-x)*src16[xi + (yi+1) * linesize] + x*src16[xi + 1 + (yi+1) * linesize]);
97  } else {
98  return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
99  + y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
100  }
101 }
102 
103 //TODO: cubic interpolate
104 //TODO: keep the last few frames
105 static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
106 static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
107 static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
108 static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); }
109 
110 static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
112 
114 {
115  GEQContext *geq = ctx->priv;
116  int plane, ret = 0;
117 
118  if (!geq->expr_str[Y] && !geq->expr_str[G] && !geq->expr_str[B] && !geq->expr_str[R]) {
119  av_log(ctx, AV_LOG_ERROR, "A luminance or RGB expression is mandatory\n");
120  ret = AVERROR(EINVAL);
121  goto end;
122  }
123  geq->is_rgb = !geq->expr_str[Y];
124 
125  if ((geq->expr_str[Y] || geq->expr_str[U] || geq->expr_str[V]) && (geq->expr_str[G] || geq->expr_str[B] || geq->expr_str[R])) {
126  av_log(ctx, AV_LOG_ERROR, "Either YCbCr or RGB but not both must be specified\n");
127  ret = AVERROR(EINVAL);
128  goto end;
129  }
130 
131  if (!geq->expr_str[U] && !geq->expr_str[V]) {
132  /* No chroma at all: fallback on luma */
133  geq->expr_str[U] = av_strdup(geq->expr_str[Y]);
134  geq->expr_str[V] = av_strdup(geq->expr_str[Y]);
135  } else {
136  /* One chroma unspecified, fallback on the other */
137  if (!geq->expr_str[U]) geq->expr_str[U] = av_strdup(geq->expr_str[V]);
138  if (!geq->expr_str[V]) geq->expr_str[V] = av_strdup(geq->expr_str[U]);
139  }
140 
141  if (!geq->expr_str[A]) {
142  char bps_string[8];
143  snprintf(bps_string, sizeof(bps_string), "%d", (1<<geq->bps) - 1);
144  geq->expr_str[A] = av_strdup(bps_string);
145  }
146  if (!geq->expr_str[G])
147  geq->expr_str[G] = av_strdup("g(X,Y)");
148  if (!geq->expr_str[B])
149  geq->expr_str[B] = av_strdup("b(X,Y)");
150  if (!geq->expr_str[R])
151  geq->expr_str[R] = av_strdup("r(X,Y)");
152 
153  if (geq->is_rgb ?
154  (!geq->expr_str[G] || !geq->expr_str[B] || !geq->expr_str[R])
155  :
156  (!geq->expr_str[U] || !geq->expr_str[V] || !geq->expr_str[A])) {
157  ret = AVERROR(ENOMEM);
158  goto end;
159  }
160 
161  for (plane = 0; plane < 4; plane++) {
162  static double (*p[])(void *, double, double) = { lum, cb, cr, alpha };
163  static const char *const func2_yuv_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
164  static const char *const func2_rgb_names[] = { "g", "b", "r", "alpha", "p", NULL };
165  const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names;
166  double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL };
167 
168  ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names,
169  NULL, NULL, func2_names, func2, 0, ctx);
170  if (ret < 0)
171  break;
172  }
173 
174 end:
175  return ret;
176 }
177 
179 {
180  GEQContext *geq = ctx->priv;
181  static const enum AVPixelFormat yuv_pix_fmts[] = {
199  };
200  static const enum AVPixelFormat rgb_pix_fmts[] = {
208  };
209  AVFilterFormats *fmts_list;
210 
211  if (geq->is_rgb) {
212  fmts_list = ff_make_format_list(rgb_pix_fmts);
213  } else
214  fmts_list = ff_make_format_list(yuv_pix_fmts);
215  if (!fmts_list)
216  return AVERROR(ENOMEM);
217  return ff_set_common_formats(ctx, fmts_list);
218 }
219 
220 static int geq_config_props(AVFilterLink *inlink)
221 {
222  GEQContext *geq = inlink->dst->priv;
224 
225  av_assert0(desc);
226 
227  geq->hsub = desc->log2_chroma_w;
228  geq->vsub = desc->log2_chroma_h;
229  geq->planes = desc->nb_components;
230  geq->bps = desc->comp[0].depth;
231 
232  return 0;
233 }
234 
236 {
237  int plane;
238  GEQContext *geq = inlink->dst->priv;
239  AVFilterLink *outlink = inlink->dst->outputs[0];
240  AVFrame *out;
241  double values[VAR_VARS_NB] = {
242  [VAR_N] = inlink->frame_count_out,
243  [VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
244  };
245 
246  geq->picref = in;
247  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
248  if (!out) {
249  av_frame_free(&in);
250  return AVERROR(ENOMEM);
251  }
252  av_frame_copy_props(out, in);
253 
254  for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
255  int x, y;
256  uint8_t *dst = out->data[plane];
257  uint16_t *dst16 = (uint16_t*)out->data[plane];
258  const int linesize = out->linesize[plane];
259  const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
260  const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
261 
262  values[VAR_W] = w;
263  values[VAR_H] = h;
264  values[VAR_SW] = w / (double)inlink->w;
265  values[VAR_SH] = h / (double)inlink->h;
266 
267  for (y = 0; y < h; y++) {
268  values[VAR_Y] = y;
269  if (geq->bps > 8) {
270  for (x = 0; x < w; x++) {
271  values[VAR_X] = x;
272  dst16[x] = av_expr_eval(geq->e[plane], values, geq);
273  }
274  dst16 += linesize / 2;
275  } else {
276  for (x = 0; x < w; x++) {
277  values[VAR_X] = x;
278  dst[x] = av_expr_eval(geq->e[plane], values, geq);
279  }
280  dst += linesize;
281  }
282  }
283  }
284 
285  av_frame_free(&geq->picref);
286  return ff_filter_frame(outlink, out);
287 }
288 
290 {
291  int i;
292  GEQContext *geq = ctx->priv;
293 
294  for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
295  av_expr_free(geq->e[i]);
296 }
297 
298 static const AVFilterPad geq_inputs[] = {
299  {
300  .name = "default",
301  .type = AVMEDIA_TYPE_VIDEO,
302  .config_props = geq_config_props,
303  .filter_frame = geq_filter_frame,
304  },
305  { NULL }
306 };
307 
308 static const AVFilterPad geq_outputs[] = {
309  {
310  .name = "default",
311  .type = AVMEDIA_TYPE_VIDEO,
312  },
313  { NULL }
314 };
315 
317  .name = "geq",
318  .description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
319  .priv_size = sizeof(GEQContext),
320  .init = geq_init,
321  .uninit = geq_uninit,
323  .inputs = geq_inputs,
324  .outputs = geq_outputs,
325  .priv_class = &geq_class,
327 };
int plane
Definition: avisynth_c.h:422
#define NULL
Definition: coverity.c:32
static const AVFilterPad geq_inputs[]
Definition: vf_geq.c:298
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:418
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:381
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:412
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2419
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
Definition: vf_geq.c:111
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:414
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:389
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:399
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:415
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
Definition: vf_geq.c:47
const char * desc
Definition: nvenc.c:60
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static av_cold void geq_uninit(AVFilterContext *ctx)
Definition: vf_geq.c:289
Definition: vf_geq.c:111
static double getpix(void *priv, double x, double y, int plane)
Definition: vf_geq.c:72
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:180
static const char *const var_names[]
Definition: vf_geq.c:110
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:395
static const AVFilterPad geq_outputs[]
Definition: vf_geq.c:308
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:383
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:672
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:92
#define src
Definition: vp8dsp.c:254
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
static enum AVPixelFormat rgb_pix_fmts[]
Definition: jpeg2000dec.c:246
AVFrame * picref
current input buffer
Definition: vf_geq.c:40
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:361
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:362
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int geq_query_formats(AVFilterContext *ctx)
Definition: vf_geq.c:178
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:106
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1151
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:102
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
AVFilter ff_vf_geq
Definition: vf_geq.c:316
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
Definition: eval.c:150
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:411
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:394
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
static int flags
Definition: log.c:57
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:384
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:417
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:188
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:108
int width
Definition: frame.h:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
int planes
number of planes
Definition: vf_geq.c:42
AVExpr * e[4]
expressions for each plane
Definition: vf_geq.c:38
static int geq_config_props(AVFilterLink *inlink)
Definition: vf_geq.c:220
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define OFFSET(x)
Definition: vf_geq.c:49
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:419
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:400
simple assert() macros that are a bit more flexible than ISO C assert().
Definition: vf_geq.c:111
Definition: vf_geq.c:111
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:382
static const AVOption geq_options[]
Definition: vf_geq.c:52
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:401
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
Definition: vf_geq.c:47
Definition: vf_geq.c:111
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:377
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:398
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:363
Definition: vf_geq.c:47
AVFormatContext * ctx
Definition: movenc.c:48
Definition: vf_geq.c:47
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:416
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:389
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:378
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:397
int bps
Definition: vf_geq.c:44
#define FF_ARRAY_ELEMS(a)
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:390
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:379
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:387
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:237
static enum AVPixelFormat yuv_pix_fmts[]
Definition: jpeg2000dec.c:248
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:327
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:189
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
Definition: vf_geq.c:47
Definition: vf_geq.c:47
char * expr_str[4+3]
expression strings for each plane
Definition: vf_geq.c:39
int hsub
Definition: vf_geq.c:41
#define FLAGS
Definition: vf_geq.c:50
static av_cold int geq_init(AVFilterContext *ctx)
Definition: vf_geq.c:113
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:379
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:68
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Definition: vf_geq.c:47
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:376
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
Definition: vf_geq.c:111
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:388
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:396
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:380
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:386
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
static double lum(void *priv, double x, double y)
Definition: vf_geq.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
Y , 8bpp.
Definition: pixfmt.h:70
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:229
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:413
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:69
Definition: vf_geq.c:111
#define NAN
Definition: math.h:28
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:727
Definition: vf_geq.c:111
int is_rgb
Definition: vf_geq.c:43
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_geq.c:235
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:259
FILE * out
Definition: movenc.c:54
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:100
internal API functions
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:107
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
int vsub
chroma subsampling
Definition: vf_geq.c:41
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:391
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:603
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AVFILTER_DEFINE_CLASS(geq)