FFmpeg
vf_fftfilt.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Arwa Arif <arwaarif1994@gmail.com>
3  * Copyright (c) 2017 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU Lesser General Public License as published
9  * by the Free Software Foundation; either version 2.1 of the License,
10  * or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * FFT domain filtering.
25  */
26 
27 #include "libavfilter/internal.h"
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavcodec/avfft.h"
33 #include "libavutil/eval.h"
34 
35 #define MAX_PLANES 4
36 
37 enum EvalMode {
41 };
42 
43 typedef struct FFTFILTContext {
44  const AVClass *class;
45 
46  int eval_mode;
47  int depth;
48  int nb_planes;
51 
62 
63  int dc[MAX_PLANES];
66  double *weight[MAX_PLANES];
67 
68  void (*rdft_horizontal)(struct FFTFILTContext *s, AVFrame *in, int w, int h, int plane);
69  void (*irdft_horizontal)(struct FFTFILTContext *s, AVFrame *out, int w, int h, int plane);
71 
72 static const char *const var_names[] = { "X", "Y", "W", "H", "N", NULL };
74 
75 enum { Y = 0, U, V };
76 
77 #define OFFSET(x) offsetof(FFTFILTContext, x)
78 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
79 
80 static const AVOption fftfilt_options[] = {
81  { "dc_Y", "adjust gain in Y plane", OFFSET(dc[Y]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
82  { "dc_U", "adjust gain in U plane", OFFSET(dc[U]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
83  { "dc_V", "adjust gain in V plane", OFFSET(dc[V]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
84  { "weight_Y", "set luminance expression in Y plane", OFFSET(weight_str[Y]), AV_OPT_TYPE_STRING, {.str = "1"}, CHAR_MIN, CHAR_MAX, FLAGS },
85  { "weight_U", "set chrominance expression in U plane", OFFSET(weight_str[U]), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
86  { "weight_V", "set chrominance expression in V plane", OFFSET(weight_str[V]), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
87  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
88  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
89  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
90  {NULL},
91 };
92 
93 AVFILTER_DEFINE_CLASS(fftfilt);
94 
95 static inline double lum(void *priv, double x, double y, int plane)
96 {
97  FFTFILTContext *s = priv;
98  return s->rdft_vdata[plane][(int)x * s->rdft_vlen[plane] + (int)y];
99 }
100 
101 static double weight_Y(void *priv, double x, double y) { return lum(priv, x, y, Y); }
102 static double weight_U(void *priv, double x, double y) { return lum(priv, x, y, U); }
103 static double weight_V(void *priv, double x, double y) { return lum(priv, x, y, V); }
104 
105 static void copy_rev (FFTSample *dest, int w, int w2)
106 {
107  int i;
108 
109  for (i = w; i < w + (w2-w)/2; i++)
110  dest[i] = dest[2*w - i - 1];
111 
112  for (; i < w2; i++)
113  dest[i] = dest[w2 - i];
114 }
115 
116 /*Horizontal pass - RDFT*/
117 static void rdft_horizontal8(FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
118 {
119  int i, j;
120 
121  for (i = 0; i < h; i++) {
122  for (j = 0; j < w; j++)
123  s->rdft_hdata[plane][i * s->rdft_hlen[plane] + j] = *(in->data[plane] + in->linesize[plane] * i + j);
124 
125  copy_rev(s->rdft_hdata[plane] + i * s->rdft_hlen[plane], w, s->rdft_hlen[plane]);
126  }
127 
128  for (i = 0; i < h; i++)
129  av_rdft_calc(s->hrdft[plane], s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
130 }
131 
132 static void rdft_horizontal16(FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
133 {
134  const uint16_t *src = (const uint16_t *)in->data[plane];
135  int linesize = in->linesize[plane] / 2;
136  int i, j;
137 
138  for (i = 0; i < h; i++) {
139  for (j = 0; j < w; j++)
140  s->rdft_hdata[plane][i * s->rdft_hlen[plane] + j] = *(src + linesize * i + j);
141 
142  copy_rev(s->rdft_hdata[plane] + i * s->rdft_hlen[plane], w, s->rdft_hlen[plane]);
143  }
144 
145  for (i = 0; i < h; i++)
146  av_rdft_calc(s->hrdft[plane], s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
147 }
148 
149 /*Vertical pass - RDFT*/
150 static void rdft_vertical(FFTFILTContext *s, int h, int plane)
151 {
152  int i, j;
153 
154  for (i = 0; i < s->rdft_hlen[plane]; i++) {
155  for (j = 0; j < h; j++)
156  s->rdft_vdata[plane][i * s->rdft_vlen[plane] + j] =
157  s->rdft_hdata[plane][j * s->rdft_hlen[plane] + i];
158  copy_rev(s->rdft_vdata[plane] + i * s->rdft_vlen[plane], h, s->rdft_vlen[plane]);
159  }
160 
161  for (i = 0; i < s->rdft_hlen[plane]; i++)
162  av_rdft_calc(s->vrdft[plane], s->rdft_vdata[plane] + i * s->rdft_vlen[plane]);
163 }
164 /*Vertical pass - IRDFT*/
165 static void irdft_vertical(FFTFILTContext *s, int h, int plane)
166 {
167  int i, j;
168 
169  for (i = 0; i < s->rdft_hlen[plane]; i++)
170  av_rdft_calc(s->ivrdft[plane], s->rdft_vdata[plane] + i * s->rdft_vlen[plane]);
171 
172  for (i = 0; i < s->rdft_hlen[plane]; i++)
173  for (j = 0; j < h; j++)
174  s->rdft_hdata[plane][j * s->rdft_hlen[plane] + i] =
175  s->rdft_vdata[plane][i * s->rdft_vlen[plane] + j];
176 }
177 
178 /*Horizontal pass - IRDFT*/
179 static void irdft_horizontal8(FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
180 {
181  int i, j;
182 
183  for (i = 0; i < h; i++)
184  av_rdft_calc(s->ihrdft[plane], s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
185 
186  for (i = 0; i < h; i++)
187  for (j = 0; j < w; j++)
188  *(out->data[plane] + out->linesize[plane] * i + j) = av_clip(s->rdft_hdata[plane][i
189  *s->rdft_hlen[plane] + j] * 4 /
190  (s->rdft_hlen[plane] *
191  s->rdft_vlen[plane]), 0, 255);
192 }
193 
194 static void irdft_horizontal16(FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
195 {
196  uint16_t *dst = (uint16_t *)out->data[plane];
197  int linesize = out->linesize[plane] / 2;
198  int max = (1 << s->depth) - 1;
199  int i, j;
200 
201  for (i = 0; i < h; i++)
202  av_rdft_calc(s->ihrdft[plane], s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
203 
204  for (i = 0; i < h; i++)
205  for (j = 0; j < w; j++)
206  *(dst + linesize * i + j) = av_clip(s->rdft_hdata[plane][i
207  *s->rdft_hlen[plane] + j] * 4 /
208  (s->rdft_hlen[plane] *
209  s->rdft_vlen[plane]), 0, max);
210 }
211 
213 {
214  FFTFILTContext *s = ctx->priv;
215  int ret = 0, plane;
216 
217  if (!s->dc[U] && !s->dc[V]) {
218  s->dc[U] = s->dc[Y];
219  s->dc[V] = s->dc[Y];
220  } else {
221  if (!s->dc[U]) s->dc[U] = s->dc[V];
222  if (!s->dc[V]) s->dc[V] = s->dc[U];
223  }
224 
225  if (!s->weight_str[U] && !s->weight_str[V]) {
226  s->weight_str[U] = av_strdup(s->weight_str[Y]);
227  s->weight_str[V] = av_strdup(s->weight_str[Y]);
228  } else {
229  if (!s->weight_str[U]) s->weight_str[U] = av_strdup(s->weight_str[V]);
230  if (!s->weight_str[V]) s->weight_str[V] = av_strdup(s->weight_str[U]);
231  }
232 
233  for (plane = 0; plane < 3; plane++) {
234  static double (*p[])(void *, double, double) = { weight_Y, weight_U, weight_V };
235  const char *const func2_names[] = {"weight_Y", "weight_U", "weight_V", NULL };
236  double (*func2[])(void *, double, double) = { weight_Y, weight_U, weight_V, p[plane], NULL };
237 
239  NULL, NULL, func2_names, func2, 0, ctx);
240  if (ret < 0)
241  break;
242  }
243  return ret;
244 }
245 
247 {
248  double values[VAR_VARS_NB];
249  int i, j;
250 
251  values[VAR_N] = inlink->frame_count_out;
252  values[VAR_W] = s->planewidth[plane];
253  values[VAR_H] = s->planeheight[plane];
254 
255  for (i = 0; i < s->rdft_hlen[plane]; i++) {
256  values[VAR_X] = i;
257  for (j = 0; j < s->rdft_vlen[plane]; j++) {
258  values[VAR_Y] = j;
259  s->weight[plane][i * s->rdft_vlen[plane] + j] =
260  av_expr_eval(s->weight_expr[plane], values, s);
261  }
262  }
263 }
264 
266 {
267  FFTFILTContext *s = inlink->dst->priv;
268  const AVPixFmtDescriptor *desc;
269  int rdft_hbits, rdft_vbits, i, plane;
270 
271  desc = av_pix_fmt_desc_get(inlink->format);
272  s->depth = desc->comp[0].depth;
273  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
274  s->planewidth[0] = s->planewidth[3] = inlink->w;
275  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
276  s->planeheight[0] = s->planeheight[3] = inlink->h;
277 
279 
280  for (i = 0; i < desc->nb_components; i++) {
281  int w = s->planewidth[i];
282  int h = s->planeheight[i];
283 
284  /* RDFT - Array initialization for Horizontal pass*/
285  for (rdft_hbits = 1; 1 << rdft_hbits < w*10/9; rdft_hbits++);
286  s->rdft_hbits[i] = rdft_hbits;
287  s->rdft_hlen[i] = 1 << rdft_hbits;
288  if (!(s->rdft_hdata[i] = av_malloc_array(h, s->rdft_hlen[i] * sizeof(FFTSample))))
289  return AVERROR(ENOMEM);
290 
291  if (!(s->hrdft[i] = av_rdft_init(s->rdft_hbits[i], DFT_R2C)))
292  return AVERROR(ENOMEM);
293  if (!(s->ihrdft[i] = av_rdft_init(s->rdft_hbits[i], IDFT_C2R)))
294  return AVERROR(ENOMEM);
295 
296  /* RDFT - Array initialization for Vertical pass*/
297  for (rdft_vbits = 1; 1 << rdft_vbits < h*10/9; rdft_vbits++);
298  s->rdft_vbits[i] = rdft_vbits;
299  s->rdft_vlen[i] = 1 << rdft_vbits;
300  if (!(s->rdft_vdata[i] = av_malloc_array(s->rdft_hlen[i], s->rdft_vlen[i] * sizeof(FFTSample))))
301  return AVERROR(ENOMEM);
302 
303  if (!(s->vrdft[i] = av_rdft_init(s->rdft_vbits[i], DFT_R2C)))
304  return AVERROR(ENOMEM);
305  if (!(s->ivrdft[i] = av_rdft_init(s->rdft_vbits[i], IDFT_C2R)))
306  return AVERROR(ENOMEM);
307  }
308 
309  /*Luminance value - Array initialization*/
310  for (plane = 0; plane < 3; plane++) {
311  if(!(s->weight[plane] = av_malloc_array(s->rdft_hlen[plane], s->rdft_vlen[plane] * sizeof(double))))
312  return AVERROR(ENOMEM);
313 
314  if (s->eval_mode == EVAL_MODE_INIT)
315  do_eval(s, inlink, plane);
316  }
317 
318  if (s->depth <= 8) {
321  } else if (s->depth > 8) {
324  } else {
325  return AVERROR_BUG;
326  }
327  return 0;
328 }
329 
331 {
332  AVFilterContext *ctx = inlink->dst;
333  AVFilterLink *outlink = inlink->dst->outputs[0];
334  FFTFILTContext *s = ctx->priv;
335  AVFrame *out;
336  int i, j, plane;
337 
338  out = ff_get_video_buffer(outlink, inlink->w, inlink->h);
339  if (!out) {
340  av_frame_free(&in);
341  return AVERROR(ENOMEM);
342  }
343 
344  av_frame_copy_props(out, in);
345 
346  for (plane = 0; plane < s->nb_planes; plane++) {
347  int w = s->planewidth[plane];
348  int h = s->planeheight[plane];
349 
350  if (s->eval_mode == EVAL_MODE_FRAME)
351  do_eval(s, inlink, plane);
352 
353  s->rdft_horizontal(s, in, w, h, plane);
354  rdft_vertical(s, h, plane);
355 
356  /*Change user defined parameters*/
357  for (i = 0; i < s->rdft_hlen[plane]; i++)
358  for (j = 0; j < s->rdft_vlen[plane]; j++)
359  s->rdft_vdata[plane][i * s->rdft_vlen[plane] + j] *=
360  s->weight[plane][i * s->rdft_vlen[plane] + j];
361 
362  s->rdft_vdata[plane][0] += s->rdft_hlen[plane] * s->rdft_vlen[plane] * s->dc[plane];
363 
364  irdft_vertical(s, h, plane);
365  s->irdft_horizontal(s, out, w, h, plane);
366  }
367 
368  av_frame_free(&in);
369  return ff_filter_frame(outlink, out);
370 }
371 
373 {
374  FFTFILTContext *s = ctx->priv;
375  int i;
376  for (i = 0; i < MAX_PLANES; i++) {
377  av_free(s->rdft_hdata[i]);
378  av_free(s->rdft_vdata[i]);
379  av_expr_free(s->weight_expr[i]);
380  av_free(s->weight[i]);
381  av_rdft_end(s->hrdft[i]);
382  av_rdft_end(s->ihrdft[i]);
383  av_rdft_end(s->vrdft[i]);
384  av_rdft_end(s->ivrdft[i]);
385  }
386 }
387 
389 {
390  static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
405  };
406 
407  AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_fftfilt);
408  if (!fmts_list)
409  return AVERROR(ENOMEM);
410  return ff_set_common_formats(ctx, fmts_list);
411 }
412 
413 static const AVFilterPad fftfilt_inputs[] = {
414  {
415  .name = "default",
416  .type = AVMEDIA_TYPE_VIDEO,
417  .config_props = config_props,
418  .filter_frame = filter_frame,
419  },
420  { NULL }
421 };
422 
423 static const AVFilterPad fftfilt_outputs[] = {
424  {
425  .name = "default",
426  .type = AVMEDIA_TYPE_VIDEO,
427  },
428  { NULL }
429 };
430 
432  .name = "fftfilt",
433  .description = NULL_IF_CONFIG_SMALL("Apply arbitrary expressions to pixels in frequency domain."),
434  .priv_size = sizeof(FFTFILTContext),
435  .priv_class = &fftfilt_class,
436  .inputs = fftfilt_inputs,
437  .outputs = fftfilt_outputs,
439  .init = initialize,
440  .uninit = uninit,
442 };
int plane
Definition: avisynth_c.h:384
#define NULL
Definition: coverity.c:32
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
Definition: vf_fftfilt.c:75
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
const char * desc
Definition: nvenc.c:68
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static void rdft_vertical(FFTFILTContext *s, int h, int plane)
Definition: vf_fftfilt.c:150
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
static void irdft_vertical(FFTFILTContext *s, int h, int plane)
Definition: vf_fftfilt.c:165
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
static void rdft_horizontal16(FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
Definition: vf_fftfilt.c:132
void(* rdft_horizontal)(struct FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
Definition: vf_fftfilt.c:68
RDFTContext * hrdft[MAX_PLANES]
Definition: vf_fftfilt.c:52
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
int planewidth[MAX_PLANES]
Definition: vf_fftfilt.c:49
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
#define av_cold
Definition: attributes.h:82
AVOptions.
static void irdft_horizontal8(FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
Definition: vf_fftfilt.c:179
size_t rdft_vlen[MAX_PLANES]
Definition: vf_fftfilt.c:59
static void copy_rev(FFTSample *dest, int w, int w2)
Definition: vf_fftfilt.c:105
Definition: eval.c:157
FFTSample * rdft_vdata[MAX_PLANES]
Definition: vf_fftfilt.c:61
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
AVFILTER_DEFINE_CLASS(fftfilt)
#define max(a, b)
Definition: cuda_runtime.h:33
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
static const char *const var_names[]
Definition: vf_fftfilt.c:72
static const AVFilterPad fftfilt_inputs[]
Definition: vf_fftfilt.c:413
A filter pad used for either input or output.
Definition: internal.h:54
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
size_t rdft_hlen[MAX_PLANES]
Definition: vf_fftfilt.c:58
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static void rdft_horizontal8(FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
Definition: vf_fftfilt.c:117
void * priv
private data for use by the filter
Definition: avfilter.h:353
static void irdft_horizontal16(FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
Definition: vf_fftfilt.c:194
Definition: avfft.h:73
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
float FFTSample
Definition: avfft.h:35
static double weight_V(void *priv, double x, double y)
Definition: vf_fftfilt.c:103
void av_rdft_calc(RDFTContext *s, FFTSample *data)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
Definition: vf_fftfilt.c:75
static const AVOption fftfilt_options[]
Definition: vf_fftfilt.c:80
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int rdft_vbits[MAX_PLANES]
Definition: vf_fftfilt.c:57
char * weight_str[MAX_PLANES]
Definition: vf_fftfilt.c:64
static double lum(void *priv, double x, double y, int plane)
Definition: vf_fftfilt.c:95
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
uint8_t w
Definition: llviddspenc.c:38
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
AVFormatContext * ctx
Definition: movenc.c:48
#define OFFSET(x)
Definition: vf_fftfilt.c:77
#define FLAGS
Definition: vf_fftfilt.c:78
#define s(width, name)
Definition: cbs_vp9.c:257
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
EvalMode
Definition: af_volume.h:39
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
int planeheight[MAX_PLANES]
Definition: vf_fftfilt.c:50
RDFTContext * vrdft[MAX_PLANES]
Definition: vf_fftfilt.c:53
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
static void do_eval(FFTFILTContext *s, AVFilterLink *inlink, int plane)
Definition: vf_fftfilt.c:246
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
double * weight[MAX_PLANES]
Definition: vf_fftfilt.c:66
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
FFT functions.
double(* func2[])(void *, double, double)
Definition: af_afftfilt.c:120
static int config_props(AVFilterLink *inlink)
Definition: vf_fftfilt.c:265
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
static double weight_U(void *priv, double x, double y)
Definition: vf_fftfilt.c:102
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
Definition: vf_fftfilt.c:75
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static int query_formats(AVFilterContext *ctx)
Definition: vf_fftfilt.c:388
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define flags(name, subs,...)
Definition: cbs_av1.c:561
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
AVFilter ff_vf_fftfilt
Definition: vf_fftfilt.c:431
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int rdft_hbits[MAX_PLANES]
Definition: vf_fftfilt.c:56
static double weight_Y(void *priv, double x, double y)
Definition: vf_fftfilt.c:101
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_fftfilt.c:372
common internal and external API header
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_fftfilt.c:330
int dc[MAX_PLANES]
Definition: vf_fftfilt.c:63
static const char *const func2_names[]
Definition: af_afftfilt.c:119
AVExpr * weight_expr[MAX_PLANES]
Definition: vf_fftfilt.c:65
#define av_free(p)
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
void(* irdft_horizontal)(struct FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
Definition: vf_fftfilt.c:69
FFTSample * rdft_hdata[MAX_PLANES]
Definition: vf_fftfilt.c:60
A list of supported formats for one end of a filter link.
Definition: formats.h:64
static const AVFilterPad fftfilt_outputs[]
Definition: vf_fftfilt.c:423
An instance of a filter.
Definition: avfilter.h:338
RDFTContext * ihrdft[MAX_PLANES]
Definition: vf_fftfilt.c:54
FILE * out
Definition: movenc.c:54
static av_cold int initialize(AVFilterContext *ctx)
Definition: vf_fftfilt.c:212
#define av_malloc_array(a, b)
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
RDFTContext * ivrdft[MAX_PLANES]
Definition: vf_fftfilt.c:55
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
#define MAX_PLANES
Definition: vf_fftfilt.c:35