FFmpeg
vf_vignette.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Clément Bœsch
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h> /* DBL_MAX */
22 
23 #include "libavutil/opt.h"
24 #include "libavutil/eval.h"
25 #include "libavutil/avassert.h"
26 #include "libavutil/pixdesc.h"
27 #include "avfilter.h"
28 #include "formats.h"
29 #include "internal.h"
30 #include "video.h"
31 
32 static const char *const var_names[] = {
33  "w", // stream width
34  "h", // stream height
35  "n", // frame count
36  "pts", // presentation timestamp expressed in AV_TIME_BASE units
37  "r", // frame rate
38  "t", // timestamp expressed in seconds
39  "tb", // timebase
40  NULL
41 };
42 
43 enum var_name {
52 };
53 
54 enum EvalMode {
58 };
59 
60 typedef struct VignetteContext {
61  const AVClass *class;
63  int backward;
64  int eval_mode; ///< EvalMode
65 #define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name
66  DEF_EXPR_FIELDS(angle);
67  DEF_EXPR_FIELDS(x0);
68  DEF_EXPR_FIELDS(y0);
69  double var_values[VAR_NB];
70  float *fmap;
72  double dmax;
73  float xscale, yscale;
74  uint32_t dither;
75  int do_dither;
79 
80 #define OFFSET(x) offsetof(VignetteContext, x)
81 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
82 static const AVOption vignette_options[] = {
83  { "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
84  { "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
85  { "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS },
86  { "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS },
87  { "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" },
88  { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"},
89  { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"},
90  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
91  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
92  { "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
93  { "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
94  { "aspect", "set aspect ratio", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, DBL_MAX, .flags = FLAGS },
95  { NULL }
96 };
97 
98 AVFILTER_DEFINE_CLASS(vignette);
99 
101 {
102  VignetteContext *s = ctx->priv;
103 
104 #define PARSE_EXPR(name) do { \
105  int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \
106  NULL, NULL, NULL, NULL, 0, ctx); \
107  if (ret < 0) { \
108  av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \
109  AV_STRINGIFY(name) "'\n"); \
110  return ret; \
111  } \
112 } while (0)
113 
114  PARSE_EXPR(angle);
115  PARSE_EXPR(x0);
116  PARSE_EXPR(y0);
117  return 0;
118 }
119 
121 {
122  VignetteContext *s = ctx->priv;
123  av_freep(&s->fmap);
124  av_expr_free(s->angle_pexpr);
125  av_expr_free(s->x0_pexpr);
126  av_expr_free(s->y0_pexpr);
127 }
128 
130 {
131  static const enum AVPixelFormat pix_fmts[] = {
138  };
139  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
140  if (!fmts_list)
141  return AVERROR(ENOMEM);
142  return ff_set_common_formats(ctx, fmts_list);
143 }
144 
145 static double get_natural_factor(const VignetteContext *s, int x, int y)
146 {
147  const int xx = (x - s->x0) * s->xscale;
148  const int yy = (y - s->y0) * s->yscale;
149  const double dnorm = hypot(xx, yy) / s->dmax;
150  if (dnorm > 1) {
151  return 0;
152  } else {
153  const double c = cos(s->angle * dnorm);
154  return (c*c)*(c*c); // do not remove braces, it helps compilers
155  }
156 }
157 
159 {
160  int x, y;
161  float *dst = s->fmap;
162  int dst_linesize = s->fmap_linesize;
163 
164  if (frame) {
165  s->var_values[VAR_N] = inlink->frame_count_out;
166  s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base);
167  s->var_values[VAR_PTS] = TS2D(frame->pts);
168  } else {
169  s->var_values[VAR_N] = NAN;
170  s->var_values[VAR_T] = NAN;
171  s->var_values[VAR_PTS] = NAN;
172  }
173 
174  s->angle = av_expr_eval(s->angle_pexpr, s->var_values, NULL);
175  s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL);
176  s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL);
177 
178  if (isnan(s->x0) || isnan(s->y0) || isnan(s->angle))
180 
181  s->angle = av_clipf(s->angle, 0, M_PI_2);
182 
183  if (s->backward) {
184  for (y = 0; y < inlink->h; y++) {
185  for (x = 0; x < inlink->w; x++)
186  dst[x] = 1. / get_natural_factor(s, x, y);
187  dst += dst_linesize;
188  }
189  } else {
190  for (y = 0; y < inlink->h; y++) {
191  for (x = 0; x < inlink->w; x++)
192  dst[x] = get_natural_factor(s, x, y);
193  dst += dst_linesize;
194  }
195  }
196 }
197 
198 static inline double get_dither_value(VignetteContext *s)
199 {
200  double dv = 0;
201  if (s->do_dither) {
202  dv = s->dither / (double)(1LL<<32);
203  s->dither = s->dither * 1664525 + 1013904223;
204  }
205  return dv;
206 }
207 
209 {
210  unsigned x, y, direct = 0;
211  AVFilterContext *ctx = inlink->dst;
212  VignetteContext *s = ctx->priv;
213  AVFilterLink *outlink = ctx->outputs[0];
214  AVFrame *out;
215 
216  if (av_frame_is_writable(in)) {
217  direct = 1;
218  out = in;
219  } else {
220  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
221  if (!out) {
222  av_frame_free(&in);
223  return AVERROR(ENOMEM);
224  }
225  av_frame_copy_props(out, in);
226  }
227 
228  if (s->eval_mode == EVAL_MODE_FRAME)
229  update_context(s, inlink, in);
230 
231  if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
232  uint8_t *dst = out->data[0];
233  const uint8_t *src = in ->data[0];
234  const float *fmap = s->fmap;
235  const int dst_linesize = out->linesize[0];
236  const int src_linesize = in ->linesize[0];
237  const int fmap_linesize = s->fmap_linesize;
238 
239  for (y = 0; y < inlink->h; y++) {
240  uint8_t *dstp = dst;
241  const uint8_t *srcp = src;
242 
243  for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
244  const float f = fmap[x];
245 
246  dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
247  dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
248  dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
249  }
250  dst += dst_linesize;
251  src += src_linesize;
252  fmap += fmap_linesize;
253  }
254  } else {
255  int plane;
256 
257  for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
258  uint8_t *dst = out->data[plane];
259  const uint8_t *src = in ->data[plane];
260  const float *fmap = s->fmap;
261  const int dst_linesize = out->linesize[plane];
262  const int src_linesize = in ->linesize[plane];
263  const int fmap_linesize = s->fmap_linesize;
264  const int chroma = plane == 1 || plane == 2;
265  const int hsub = chroma ? s->desc->log2_chroma_w : 0;
266  const int vsub = chroma ? s->desc->log2_chroma_h : 0;
267  const int w = AV_CEIL_RSHIFT(inlink->w, hsub);
268  const int h = AV_CEIL_RSHIFT(inlink->h, vsub);
269 
270  for (y = 0; y < h; y++) {
271  uint8_t *dstp = dst;
272  const uint8_t *srcp = src;
273 
274  for (x = 0; x < w; x++) {
275  const double dv = get_dither_value(s);
276  if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
277  else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
278  }
279  dst += dst_linesize;
280  src += src_linesize;
281  fmap += fmap_linesize << vsub;
282  }
283  }
284  }
285 
286  if (!direct)
287  av_frame_free(&in);
288  return ff_filter_frame(outlink, out);
289 }
290 
292 {
293  VignetteContext *s = inlink->dst->priv;
294  AVRational sar = inlink->sample_aspect_ratio;
295 
296  s->desc = av_pix_fmt_desc_get(inlink->format);
297  s->var_values[VAR_W] = inlink->w;
298  s->var_values[VAR_H] = inlink->h;
299  s->var_values[VAR_TB] = av_q2d(inlink->time_base);
300  s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
301  NAN : av_q2d(inlink->frame_rate);
302 
303  if (!sar.num || !sar.den)
304  sar.num = sar.den = 1;
305  if (sar.num > sar.den) {
306  s->xscale = av_q2d(av_div_q(sar, s->aspect));
307  s->yscale = 1;
308  } else {
309  s->yscale = av_q2d(av_div_q(s->aspect, sar));
310  s->xscale = 1;
311  }
312  s->dmax = hypot(inlink->w / 2., inlink->h / 2.);
313  av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n",
314  s->xscale, s->yscale, s->dmax);
315 
316  s->fmap_linesize = FFALIGN(inlink->w, 32);
317  s->fmap = av_malloc_array(s->fmap_linesize, inlink->h * sizeof(*s->fmap));
318  if (!s->fmap)
319  return AVERROR(ENOMEM);
320 
321  if (s->eval_mode == EVAL_MODE_INIT)
322  update_context(s, inlink, NULL);
323 
324  return 0;
325 }
326 
327 static const AVFilterPad vignette_inputs[] = {
328  {
329  .name = "default",
330  .type = AVMEDIA_TYPE_VIDEO,
331  .filter_frame = filter_frame,
332  .config_props = config_props,
333  },
334  { NULL }
335 };
336 
337 static const AVFilterPad vignette_outputs[] = {
338  {
339  .name = "default",
340  .type = AVMEDIA_TYPE_VIDEO,
341  },
342  { NULL }
343 };
344 
346  .name = "vignette",
347  .description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."),
348  .priv_size = sizeof(VignetteContext),
349  .init = init,
350  .uninit = uninit,
352  .inputs = vignette_inputs,
353  .outputs = vignette_outputs,
354  .priv_class = &vignette_class,
356 };
#define NULL
Definition: coverity.c:32
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVFilter ff_vf_vignette
Definition: vf_vignette.c:345
AVOption.
Definition: opt.h:248
static const AVFilterPad vignette_outputs[]
Definition: vf_vignette.c:337
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
static const AVOption vignette_options[]
Definition: vf_vignette.c:82
#define TS2T(ts, tb)
Definition: internal.h:239
int num
Numerator.
Definition: rational.h:59
static double get_dither_value(VignetteContext *s)
Definition: vf_vignette.c:198
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static av_cold int init(AVFilterContext *ctx)
Definition: vf_vignette.c:100
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_vignette.c:120
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
static const AVFilterPad vignette_inputs[]
Definition: vf_vignette.c:327
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
uint8_t
#define av_cold
Definition: attributes.h:88
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
static double get_natural_factor(const VignetteContext *s, int x, int y)
Definition: vf_vignette.c:145
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
static int query_formats(AVFilterContext *ctx)
Definition: vf_vignette.c:129
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
#define src
Definition: vp8dsp.c:254
AVRational scale
Definition: vf_vignette.c:77
#define FLAGS
Definition: vf_vignette.c:81
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:600
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1631
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
simple assert() macros that are a bit more flexible than ISO C assert().
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
var_name
Definition: aeval.c:46
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
double var_values[VAR_NB]
Definition: vf_vignette.c:69
static av_const double hypot(double x, double y)
Definition: libm.h:366
#define NAN
Definition: mathematics.h:64
uint8_t w
Definition: llviddspenc.c:38
#define M_PI_2
Definition: mathematics.h:55
#define PARSE_EXPR(name)
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define OFFSET(x)
Definition: vf_vignette.c:80
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AVRational aspect
Definition: vf_vignette.c:76
EvalMode
Definition: af_volume.h:39
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int config_props(AVFilterLink *inlink)
Definition: vf_vignette.c:291
AVFILTER_DEFINE_CLASS(vignette)
DEF_EXPR_FIELDS(angle)
static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
Definition: vf_vignette.c:158
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:595
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_vignette.c:208
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int eval_mode
EvalMode.
Definition: vf_vignette.c:64
#define TS2D(ts)
Definition: internal.h:238
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define isnan(x)
Definition: libm.h:340
const char * name
Filter name.
Definition: avfilter.h:148
const AVPixFmtDescriptor * desc
Definition: vf_vignette.c:62
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
uint32_t dither
Definition: vf_vignette.c:74
#define flags(name, subs,...)
Definition: cbs_av1.c:560
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
int den
Denominator.
Definition: rational.h:60
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
static const char *const var_names[]
Definition: vf_vignette.c:32
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
#define av_malloc_array(a, b)
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:75
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58