FFmpeg
vf_vignette.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Clément Bœsch
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h> /* DBL_MAX */
22 
23 #include "libavutil/opt.h"
24 #include "libavutil/eval.h"
25 #include "libavutil/avassert.h"
26 #include "libavutil/pixdesc.h"
27 #include "avfilter.h"
28 #include "formats.h"
29 #include "internal.h"
30 #include "video.h"
31 
32 static const char *const var_names[] = {
33  "w", // stream width
34  "h", // stream height
35  "n", // frame count
36  "pts", // presentation timestamp expressed in AV_TIME_BASE units
37  "r", // frame rate
38  "t", // timestamp expressed in seconds
39  "tb", // timebase
40  NULL
41 };
42 
43 enum var_name {
52 };
53 
54 enum EvalMode {
58 };
59 
60 typedef struct VignetteContext {
61  const AVClass *class;
63  int backward;
64  int eval_mode; ///< EvalMode
65 #define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name
66  DEF_EXPR_FIELDS(angle);
67  DEF_EXPR_FIELDS(x0);
68  DEF_EXPR_FIELDS(y0);
69  double var_values[VAR_NB];
70  float *fmap;
72  double dmax;
73  float xscale, yscale;
74  uint32_t dither;
75  int do_dither;
79 
80 #define OFFSET(x) offsetof(VignetteContext, x)
81 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
82 static const AVOption vignette_options[] = {
83  { "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
84  { "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
85  { "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS },
86  { "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS },
87  { "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" },
88  { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"},
89  { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"},
90  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
91  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
92  { "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
93  { "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
94  { "aspect", "set aspect ratio", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, DBL_MAX, .flags = FLAGS },
95  { NULL }
96 };
97 
98 AVFILTER_DEFINE_CLASS(vignette);
99 
101 {
102  VignetteContext *s = ctx->priv;
103 
104 #define PARSE_EXPR(name) do { \
105  int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \
106  NULL, NULL, NULL, NULL, 0, ctx); \
107  if (ret < 0) { \
108  av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \
109  AV_STRINGIFY(name) "'\n"); \
110  return ret; \
111  } \
112 } while (0)
113 
114  PARSE_EXPR(angle);
115  PARSE_EXPR(x0);
116  PARSE_EXPR(y0);
117  return 0;
118 }
119 
121 {
122  VignetteContext *s = ctx->priv;
123  av_freep(&s->fmap);
124  av_expr_free(s->angle_pexpr);
125  av_expr_free(s->x0_pexpr);
126  av_expr_free(s->y0_pexpr);
127 }
128 
130 {
131  static const enum AVPixelFormat pix_fmts[] = {
138  };
140  if (!fmts_list)
141  return AVERROR(ENOMEM);
142  return ff_set_common_formats(ctx, fmts_list);
143 }
144 
145 static double get_natural_factor(const VignetteContext *s, int x, int y)
146 {
147  const int xx = (x - s->x0) * s->xscale;
148  const int yy = (y - s->y0) * s->yscale;
149  const double dnorm = hypot(xx, yy) / s->dmax;
150  if (dnorm > 1) {
151  return 0;
152  } else {
153  const double c = cos(s->angle * dnorm);
154  return (c*c)*(c*c); // do not remove braces, it helps compilers
155  }
156 }
157 
158 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
159 #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
160 
162 {
163  int x, y;
164  float *dst = s->fmap;
165  int dst_linesize = s->fmap_linesize;
166 
167  if (frame) {
168  s->var_values[VAR_N] = inlink->frame_count_out;
169  s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base);
170  s->var_values[VAR_PTS] = TS2D(frame->pts);
171  } else {
172  s->var_values[VAR_N] = NAN;
173  s->var_values[VAR_T] = NAN;
174  s->var_values[VAR_PTS] = NAN;
175  }
176 
177  s->angle = av_expr_eval(s->angle_pexpr, s->var_values, NULL);
178  s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL);
179  s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL);
180 
181  if (isnan(s->x0) || isnan(s->y0) || isnan(s->angle))
182  s->eval_mode = EVAL_MODE_FRAME;
183 
184  s->angle = av_clipf(s->angle, 0, M_PI_2);
185 
186  if (s->backward) {
187  for (y = 0; y < inlink->h; y++) {
188  for (x = 0; x < inlink->w; x++)
189  dst[x] = 1. / get_natural_factor(s, x, y);
190  dst += dst_linesize;
191  }
192  } else {
193  for (y = 0; y < inlink->h; y++) {
194  for (x = 0; x < inlink->w; x++)
195  dst[x] = get_natural_factor(s, x, y);
196  dst += dst_linesize;
197  }
198  }
199 }
200 
201 static inline double get_dither_value(VignetteContext *s)
202 {
203  double dv = 0;
204  if (s->do_dither) {
205  dv = s->dither / (double)(1LL<<32);
206  s->dither = s->dither * 1664525 + 1013904223;
207  }
208  return dv;
209 }
210 
212 {
213  unsigned x, y, direct = 0;
214  AVFilterContext *ctx = inlink->dst;
215  VignetteContext *s = ctx->priv;
216  AVFilterLink *outlink = ctx->outputs[0];
217  AVFrame *out;
218 
219  if (av_frame_is_writable(in)) {
220  direct = 1;
221  out = in;
222  } else {
223  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
224  if (!out) {
225  av_frame_free(&in);
226  return AVERROR(ENOMEM);
227  }
229  }
230 
231  if (s->eval_mode == EVAL_MODE_FRAME)
233 
234  if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
235  uint8_t *dst = out->data[0];
236  const uint8_t *src = in ->data[0];
237  const float *fmap = s->fmap;
238  const int dst_linesize = out->linesize[0];
239  const int src_linesize = in ->linesize[0];
240  const int fmap_linesize = s->fmap_linesize;
241 
242  for (y = 0; y < inlink->h; y++) {
243  uint8_t *dstp = dst;
244  const uint8_t *srcp = src;
245 
246  for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
247  const float f = fmap[x];
248 
249  dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
250  dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
251  dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
252  }
253  dst += dst_linesize;
254  src += src_linesize;
255  fmap += fmap_linesize;
256  }
257  } else {
258  int plane;
259 
260  for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
261  uint8_t *dst = out->data[plane];
262  const uint8_t *src = in ->data[plane];
263  const float *fmap = s->fmap;
264  const int dst_linesize = out->linesize[plane];
265  const int src_linesize = in ->linesize[plane];
266  const int fmap_linesize = s->fmap_linesize;
267  const int chroma = plane == 1 || plane == 2;
268  const int hsub = chroma ? s->desc->log2_chroma_w : 0;
269  const int vsub = chroma ? s->desc->log2_chroma_h : 0;
270  const int w = AV_CEIL_RSHIFT(inlink->w, hsub);
271  const int h = AV_CEIL_RSHIFT(inlink->h, vsub);
272 
273  for (y = 0; y < h; y++) {
274  uint8_t *dstp = dst;
275  const uint8_t *srcp = src;
276 
277  for (x = 0; x < w; x++) {
278  const double dv = get_dither_value(s);
279  if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
280  else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
281  }
282  dst += dst_linesize;
283  src += src_linesize;
284  fmap += fmap_linesize << vsub;
285  }
286  }
287  }
288 
289  if (!direct)
290  av_frame_free(&in);
291  return ff_filter_frame(outlink, out);
292 }
293 
295 {
296  VignetteContext *s = inlink->dst->priv;
297  AVRational sar = inlink->sample_aspect_ratio;
298 
299  s->desc = av_pix_fmt_desc_get(inlink->format);
300  s->var_values[VAR_W] = inlink->w;
301  s->var_values[VAR_H] = inlink->h;
302  s->var_values[VAR_TB] = av_q2d(inlink->time_base);
303  s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
304  NAN : av_q2d(inlink->frame_rate);
305 
306  if (!sar.num || !sar.den)
307  sar.num = sar.den = 1;
308  if (sar.num > sar.den) {
309  s->xscale = av_q2d(av_div_q(sar, s->aspect));
310  s->yscale = 1;
311  } else {
312  s->yscale = av_q2d(av_div_q(s->aspect, sar));
313  s->xscale = 1;
314  }
315  s->dmax = hypot(inlink->w / 2., inlink->h / 2.);
316  av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n",
317  s->xscale, s->yscale, s->dmax);
318 
319  s->fmap_linesize = FFALIGN(inlink->w, 32);
320  s->fmap = av_malloc_array(s->fmap_linesize, inlink->h * sizeof(*s->fmap));
321  if (!s->fmap)
322  return AVERROR(ENOMEM);
323 
324  if (s->eval_mode == EVAL_MODE_INIT)
326 
327  return 0;
328 }
329 
330 static const AVFilterPad vignette_inputs[] = {
331  {
332  .name = "default",
333  .type = AVMEDIA_TYPE_VIDEO,
334  .filter_frame = filter_frame,
335  .config_props = config_props,
336  },
337  { NULL }
338 };
339 
340 static const AVFilterPad vignette_outputs[] = {
341  {
342  .name = "default",
343  .type = AVMEDIA_TYPE_VIDEO,
344  },
345  { NULL }
346 };
347 
349  .name = "vignette",
350  .description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."),
351  .priv_size = sizeof(VignetteContext),
352  .init = init,
353  .uninit = uninit,
357  .priv_class = &vignette_class,
359 };
var_names
static const char *const var_names[]
Definition: vf_vignette.c:32
VignetteContext::fmap_linesize
int fmap_linesize
Definition: vf_vignette.c:71
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_vignette.c:120
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
VAR_T
@ VAR_T
Definition: vf_vignette.c:49
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
out
FILE * out
Definition: movenc.c:54
vignette_options
static const AVOption vignette_options[]
Definition: vf_vignette.c:82
VAR_NB
@ VAR_NB
Definition: vf_vignette.c:51
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
VignetteContext
Definition: vf_vignette.c:60
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
VAR_TB
@ VAR_TB
Definition: vf_vignette.c:50
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
VignetteContext::var_values
double var_values[VAR_NB]
Definition: vf_vignette.c:69
VignetteContext::do_dither
int do_dither
Definition: vf_vignette.c:75
M_PI_2
#define M_PI_2
Definition: mathematics.h:55
AVOption
AVOption.
Definition: opt.h:246
chroma
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1511
get_natural_factor
static double get_natural_factor(const VignetteContext *s, int x, int y)
Definition: vf_vignette.c:145
float.h
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
AV_OPT_TYPE_RATIONAL
@ AV_OPT_TYPE_RATIONAL
Definition: opt.h:228
video.h
srcp
BYTE int const BYTE * srcp
Definition: avisynth_c.h:908
vignette_inputs
static const AVFilterPad vignette_inputs[]
Definition: vf_vignette.c:330
FLAGS
#define FLAGS
Definition: vf_vignette.c:81
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_vignette.c:100
dstp
BYTE * dstp
Definition: avisynth_c.h:908
VignetteContext::dmax
double dmax
Definition: vf_vignette.c:72
plane
int plane
Definition: avisynth_c.h:384
VignetteContext::DEF_EXPR_FIELDS
DEF_EXPR_FIELDS(angle)
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
AVRational::num
int num
Numerator.
Definition: rational.h:59
src
#define src
Definition: vp8dsp.c:254
OFFSET
#define OFFSET(x)
Definition: vf_vignette.c:80
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
avassert.h
av_cold
#define av_cold
Definition: attributes.h:84
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_vignette.c:129
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
VAR_N
@ VAR_N
Definition: vf_vignette.c:46
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
VignetteContext::xscale
float xscale
Definition: vf_vignette.c:73
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_vignette.c:55
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
VignetteContext::yscale
float yscale
Definition: vf_vignette.c:73
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
VAR_H
@ VAR_H
Definition: vf_vignette.c:45
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
VignetteContext::dither
uint32_t dither
Definition: vf_vignette.c:74
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VAR_R
@ VAR_R
Definition: vf_vignette.c:48
eval.h
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
var_name
var_name
Definition: aeval.c:46
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_vignette.c:56
PARSE_EXPR
#define PARSE_EXPR(name)
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_vignette.c:294
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
VignetteContext::scale
AVRational scale
Definition: vf_vignette.c:77
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_vignette.c:57
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
VignetteContext::eval_mode
int eval_mode
EvalMode.
Definition: vf_vignette.c:64
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(vignette)
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_vignette.c:211
VignetteContext::aspect
AVRational aspect
Definition: vf_vignette.c:76
EvalMode
EvalMode
Definition: af_volume.h:39
uint8_t
uint8_t
Definition: audio_convert.c:194
TS2D
#define TS2D(ts)
Definition: vf_vignette.c:158
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
VignetteContext::desc
const AVPixFmtDescriptor * desc
Definition: vf_vignette.c:62
AVFilter
Filter definition.
Definition: avfilter.h:144
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_vf_vignette
AVFilter ff_vf_vignette
Definition: vf_vignette.c:348
VAR_W
@ VAR_W
Definition: vf_vignette.c:44
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
update_context
static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
Definition: vf_vignette.c:161
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
VignetteContext::fmap
float * fmap
Definition: vf_vignette.c:70
VignetteContext::backward
int backward
Definition: vf_vignette.c:63
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
VAR_PTS
@ VAR_PTS
Definition: vf_vignette.c:47
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
get_dither_value
static double get_dither_value(VignetteContext *s)
Definition: vf_vignette.c:201
h
h
Definition: vp9dsp_template.c:2038
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:227
vignette_outputs
static const AVFilterPad vignette_outputs[]
Definition: vf_vignette.c:340
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
TS2T
#define TS2T(ts, tb)
Definition: vf_vignette.c:159