FFmpeg
vf_fftfilt.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Arwa Arif <arwaarif1994@gmail.com>
3  * Copyright (c) 2017 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU Lesser General Public License as published
9  * by the Free Software Foundation; either version 2.1 of the License,
10  * or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * FFT domain filtering.
25  */
26 
27 #include "libavfilter/internal.h"
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavcodec/avfft.h"
33 #include "libavutil/eval.h"
34 
35 #define MAX_PLANES 4
36 
37 enum EvalMode {
41 };
42 
43 typedef struct FFTFILTContext {
44  const AVClass *class;
45 
46  int eval_mode;
47  int depth;
48  int nb_planes;
51 
62 
63  int dc[MAX_PLANES];
66  double *weight[MAX_PLANES];
67 
68  void (*rdft_horizontal)(struct FFTFILTContext *s, AVFrame *in, int w, int h, int plane);
69  void (*irdft_horizontal)(struct FFTFILTContext *s, AVFrame *out, int w, int h, int plane);
71 
72 static const char *const var_names[] = { "X", "Y", "W", "H", "N", NULL };
74 
75 enum { Y = 0, U, V };
76 
77 #define OFFSET(x) offsetof(FFTFILTContext, x)
78 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
79 
80 static const AVOption fftfilt_options[] = {
81  { "dc_Y", "adjust gain in Y plane", OFFSET(dc[Y]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
82  { "dc_U", "adjust gain in U plane", OFFSET(dc[U]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
83  { "dc_V", "adjust gain in V plane", OFFSET(dc[V]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
84  { "weight_Y", "set luminance expression in Y plane", OFFSET(weight_str[Y]), AV_OPT_TYPE_STRING, {.str = "1"}, 0, 0, FLAGS },
85  { "weight_U", "set chrominance expression in U plane", OFFSET(weight_str[U]), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
86  { "weight_V", "set chrominance expression in V plane", OFFSET(weight_str[V]), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
87  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
88  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
89  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
90  {NULL},
91 };
92 
93 AVFILTER_DEFINE_CLASS(fftfilt);
94 
95 static inline double lum(void *priv, double x, double y, int plane)
96 {
97  FFTFILTContext *s = priv;
98  return s->rdft_vdata[plane][(int)x * s->rdft_vlen[plane] + (int)y];
99 }
100 
101 static double weight_Y(void *priv, double x, double y) { return lum(priv, x, y, Y); }
102 static double weight_U(void *priv, double x, double y) { return lum(priv, x, y, U); }
103 static double weight_V(void *priv, double x, double y) { return lum(priv, x, y, V); }
104 
105 static void copy_rev (FFTSample *dest, int w, int w2)
106 {
107  int i;
108 
109  for (i = w; i < w + (w2-w)/2; i++)
110  dest[i] = dest[2*w - i - 1];
111 
112  for (; i < w2; i++)
113  dest[i] = dest[w2 - i];
114 }
115 
116 /*Horizontal pass - RDFT*/
117 static void rdft_horizontal8(FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
118 {
119  int i, j;
120 
121  for (i = 0; i < h; i++) {
122  for (j = 0; j < w; j++)
123  s->rdft_hdata[plane][i * s->rdft_hlen[plane] + j] = *(in->data[plane] + in->linesize[plane] * i + j);
124 
125  copy_rev(s->rdft_hdata[plane] + i * s->rdft_hlen[plane], w, s->rdft_hlen[plane]);
126  }
127 
128  for (i = 0; i < h; i++)
129  av_rdft_calc(s->hrdft[plane], s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
130 }
131 
132 static void rdft_horizontal16(FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
133 {
134  const uint16_t *src = (const uint16_t *)in->data[plane];
135  int linesize = in->linesize[plane] / 2;
136  int i, j;
137 
138  for (i = 0; i < h; i++) {
139  for (j = 0; j < w; j++)
140  s->rdft_hdata[plane][i * s->rdft_hlen[plane] + j] = *(src + linesize * i + j);
141 
142  copy_rev(s->rdft_hdata[plane] + i * s->rdft_hlen[plane], w, s->rdft_hlen[plane]);
143  }
144 
145  for (i = 0; i < h; i++)
146  av_rdft_calc(s->hrdft[plane], s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
147 }
148 
149 /*Vertical pass - RDFT*/
150 static void rdft_vertical(FFTFILTContext *s, int h, int plane)
151 {
152  int i, j;
153 
154  for (i = 0; i < s->rdft_hlen[plane]; i++) {
155  for (j = 0; j < h; j++)
156  s->rdft_vdata[plane][i * s->rdft_vlen[plane] + j] =
157  s->rdft_hdata[plane][j * s->rdft_hlen[plane] + i];
158  copy_rev(s->rdft_vdata[plane] + i * s->rdft_vlen[plane], h, s->rdft_vlen[plane]);
159  }
160 
161  for (i = 0; i < s->rdft_hlen[plane]; i++)
162  av_rdft_calc(s->vrdft[plane], s->rdft_vdata[plane] + i * s->rdft_vlen[plane]);
163 }
164 /*Vertical pass - IRDFT*/
165 static void irdft_vertical(FFTFILTContext *s, int h, int plane)
166 {
167  int i, j;
168 
169  for (i = 0; i < s->rdft_hlen[plane]; i++)
170  av_rdft_calc(s->ivrdft[plane], s->rdft_vdata[plane] + i * s->rdft_vlen[plane]);
171 
172  for (i = 0; i < s->rdft_hlen[plane]; i++)
173  for (j = 0; j < h; j++)
174  s->rdft_hdata[plane][j * s->rdft_hlen[plane] + i] =
175  s->rdft_vdata[plane][i * s->rdft_vlen[plane] + j];
176 }
177 
178 /*Horizontal pass - IRDFT*/
179 static void irdft_horizontal8(FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
180 {
181  int i, j;
182 
183  for (i = 0; i < h; i++)
184  av_rdft_calc(s->ihrdft[plane], s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
185 
186  for (i = 0; i < h; i++)
187  for (j = 0; j < w; j++)
188  *(out->data[plane] + out->linesize[plane] * i + j) = av_clip(s->rdft_hdata[plane][i
189  *s->rdft_hlen[plane] + j] * 4 /
190  (s->rdft_hlen[plane] *
191  s->rdft_vlen[plane]), 0, 255);
192 }
193 
194 static void irdft_horizontal16(FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
195 {
196  uint16_t *dst = (uint16_t *)out->data[plane];
197  int linesize = out->linesize[plane] / 2;
198  int max = (1 << s->depth) - 1;
199  int i, j;
200 
201  for (i = 0; i < h; i++)
202  av_rdft_calc(s->ihrdft[plane], s->rdft_hdata[plane] + i * s->rdft_hlen[plane]);
203 
204  for (i = 0; i < h; i++)
205  for (j = 0; j < w; j++)
206  *(dst + linesize * i + j) = av_clip(s->rdft_hdata[plane][i
207  *s->rdft_hlen[plane] + j] * 4 /
208  (s->rdft_hlen[plane] *
209  s->rdft_vlen[plane]), 0, max);
210 }
211 
213 {
214  FFTFILTContext *s = ctx->priv;
215  int ret = 0, plane;
216 
217  if (!s->dc[U] && !s->dc[V]) {
218  s->dc[U] = s->dc[Y];
219  s->dc[V] = s->dc[Y];
220  } else {
221  if (!s->dc[U]) s->dc[U] = s->dc[V];
222  if (!s->dc[V]) s->dc[V] = s->dc[U];
223  }
224 
225  if (!s->weight_str[U] && !s->weight_str[V]) {
226  s->weight_str[U] = av_strdup(s->weight_str[Y]);
227  s->weight_str[V] = av_strdup(s->weight_str[Y]);
228  } else {
229  if (!s->weight_str[U]) s->weight_str[U] = av_strdup(s->weight_str[V]);
230  if (!s->weight_str[V]) s->weight_str[V] = av_strdup(s->weight_str[U]);
231  }
232 
233  for (plane = 0; plane < 3; plane++) {
234  static double (*p[])(void *, double, double) = { weight_Y, weight_U, weight_V };
235  const char *const func2_names[] = {"weight_Y", "weight_U", "weight_V", NULL };
236  double (*func2[])(void *, double, double) = { weight_Y, weight_U, weight_V, p[plane], NULL };
237 
238  ret = av_expr_parse(&s->weight_expr[plane], s->weight_str[plane], var_names,
239  NULL, NULL, func2_names, func2, 0, ctx);
240  if (ret < 0)
241  break;
242  }
243  return ret;
244 }
245 
246 static void do_eval(FFTFILTContext *s, AVFilterLink *inlink, int plane)
247 {
248  double values[VAR_VARS_NB];
249  int i, j;
250 
251  values[VAR_N] = inlink->frame_count_out;
252  values[VAR_W] = s->planewidth[plane];
253  values[VAR_H] = s->planeheight[plane];
254 
255  for (i = 0; i < s->rdft_hlen[plane]; i++) {
256  values[VAR_X] = i;
257  for (j = 0; j < s->rdft_vlen[plane]; j++) {
258  values[VAR_Y] = j;
259  s->weight[plane][i * s->rdft_vlen[plane] + j] =
260  av_expr_eval(s->weight_expr[plane], values, s);
261  }
262  }
263 }
264 
266 {
267  FFTFILTContext *s = inlink->dst->priv;
268  const AVPixFmtDescriptor *desc;
269  int rdft_hbits, rdft_vbits, i, plane;
270 
271  desc = av_pix_fmt_desc_get(inlink->format);
272  s->depth = desc->comp[0].depth;
273  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
274  s->planewidth[0] = s->planewidth[3] = inlink->w;
275  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
276  s->planeheight[0] = s->planeheight[3] = inlink->h;
277 
278  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
279 
280  for (i = 0; i < desc->nb_components; i++) {
281  int w = s->planewidth[i];
282  int h = s->planeheight[i];
283 
284  /* RDFT - Array initialization for Horizontal pass*/
285  for (rdft_hbits = 1; 1 << rdft_hbits < w*10/9; rdft_hbits++);
286  s->rdft_hbits[i] = rdft_hbits;
287  s->rdft_hlen[i] = 1 << rdft_hbits;
288  if (!(s->rdft_hdata[i] = av_malloc_array(h, s->rdft_hlen[i] * sizeof(FFTSample))))
289  return AVERROR(ENOMEM);
290 
291  if (!(s->hrdft[i] = av_rdft_init(s->rdft_hbits[i], DFT_R2C)))
292  return AVERROR(ENOMEM);
293  if (!(s->ihrdft[i] = av_rdft_init(s->rdft_hbits[i], IDFT_C2R)))
294  return AVERROR(ENOMEM);
295 
296  /* RDFT - Array initialization for Vertical pass*/
297  for (rdft_vbits = 1; 1 << rdft_vbits < h*10/9; rdft_vbits++);
298  s->rdft_vbits[i] = rdft_vbits;
299  s->rdft_vlen[i] = 1 << rdft_vbits;
300  if (!(s->rdft_vdata[i] = av_malloc_array(s->rdft_hlen[i], s->rdft_vlen[i] * sizeof(FFTSample))))
301  return AVERROR(ENOMEM);
302 
303  if (!(s->vrdft[i] = av_rdft_init(s->rdft_vbits[i], DFT_R2C)))
304  return AVERROR(ENOMEM);
305  if (!(s->ivrdft[i] = av_rdft_init(s->rdft_vbits[i], IDFT_C2R)))
306  return AVERROR(ENOMEM);
307  }
308 
309  /*Luminance value - Array initialization*/
310  for (plane = 0; plane < 3; plane++) {
311  if(!(s->weight[plane] = av_malloc_array(s->rdft_hlen[plane], s->rdft_vlen[plane] * sizeof(double))))
312  return AVERROR(ENOMEM);
313 
314  if (s->eval_mode == EVAL_MODE_INIT)
315  do_eval(s, inlink, plane);
316  }
317 
318  if (s->depth <= 8) {
319  s->rdft_horizontal = rdft_horizontal8;
320  s->irdft_horizontal = irdft_horizontal8;
321  } else if (s->depth > 8) {
322  s->rdft_horizontal = rdft_horizontal16;
323  s->irdft_horizontal = irdft_horizontal16;
324  } else {
325  return AVERROR_BUG;
326  }
327  return 0;
328 }
329 
331 {
332  AVFilterContext *ctx = inlink->dst;
333  AVFilterLink *outlink = inlink->dst->outputs[0];
334  FFTFILTContext *s = ctx->priv;
335  AVFrame *out;
336  int i, j, plane;
337 
338  out = ff_get_video_buffer(outlink, inlink->w, inlink->h);
339  if (!out) {
340  av_frame_free(&in);
341  return AVERROR(ENOMEM);
342  }
343 
345 
346  for (plane = 0; plane < s->nb_planes; plane++) {
347  int w = s->planewidth[plane];
348  int h = s->planeheight[plane];
349 
350  if (s->eval_mode == EVAL_MODE_FRAME)
351  do_eval(s, inlink, plane);
352 
353  s->rdft_horizontal(s, in, w, h, plane);
354  rdft_vertical(s, h, plane);
355 
356  /*Change user defined parameters*/
357  for (i = 0; i < s->rdft_hlen[plane]; i++)
358  for (j = 0; j < s->rdft_vlen[plane]; j++)
359  s->rdft_vdata[plane][i * s->rdft_vlen[plane] + j] *=
360  s->weight[plane][i * s->rdft_vlen[plane] + j];
361 
362  s->rdft_vdata[plane][0] += s->rdft_hlen[plane] * s->rdft_vlen[plane] * s->dc[plane];
363 
364  irdft_vertical(s, h, plane);
365  s->irdft_horizontal(s, out, w, h, plane);
366  }
367 
368  av_frame_free(&in);
369  return ff_filter_frame(outlink, out);
370 }
371 
373 {
374  FFTFILTContext *s = ctx->priv;
375  int i;
376  for (i = 0; i < MAX_PLANES; i++) {
377  av_free(s->rdft_hdata[i]);
378  av_free(s->rdft_vdata[i]);
379  av_expr_free(s->weight_expr[i]);
380  av_free(s->weight[i]);
381  av_rdft_end(s->hrdft[i]);
382  av_rdft_end(s->ihrdft[i]);
383  av_rdft_end(s->vrdft[i]);
384  av_rdft_end(s->ivrdft[i]);
385  }
386 }
387 
389 {
390  static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
405  };
406 
407  AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_fftfilt);
408  if (!fmts_list)
409  return AVERROR(ENOMEM);
410  return ff_set_common_formats(ctx, fmts_list);
411 }
412 
413 static const AVFilterPad fftfilt_inputs[] = {
414  {
415  .name = "default",
416  .type = AVMEDIA_TYPE_VIDEO,
417  .config_props = config_props,
418  .filter_frame = filter_frame,
419  },
420  { NULL }
421 };
422 
423 static const AVFilterPad fftfilt_outputs[] = {
424  {
425  .name = "default",
426  .type = AVMEDIA_TYPE_VIDEO,
427  },
428  { NULL }
429 };
430 
432  .name = "fftfilt",
433  .description = NULL_IF_CONFIG_SMALL("Apply arbitrary expressions to pixels in frequency domain."),
434  .priv_size = sizeof(FFTFILTContext),
435  .priv_class = &fftfilt_class,
439  .init = initialize,
440  .uninit = uninit,
442 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_clip
#define av_clip
Definition: common.h:122
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
fftfilt_outputs
static const AVFilterPad fftfilt_outputs[]
Definition: vf_fftfilt.c:423
out
FILE * out
Definition: movenc.c:54
U
@ U
Definition: vf_fftfilt.c:75
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_fftfilt.c:265
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:39
FFTFILTContext::rdft_vbits
int rdft_vbits[MAX_PLANES]
Definition: vf_fftfilt.c:57
AVOption
AVOption.
Definition: opt.h:248
VAR_H
@ VAR_H
Definition: vf_fftfilt.c:73
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
FFTFILTContext::rdft_vdata
FFTSample * rdft_vdata[MAX_PLANES]
Definition: vf_fftfilt.c:61
func2_names
static const char *const func2_names[]
Definition: af_afftfilt.c:120
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_fftfilt.c:372
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_fftfilt.c:39
VAR_VARS_NB
@ VAR_VARS_NB
Definition: vf_fftfilt.c:73
rdft_horizontal16
static void rdft_horizontal16(FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
Definition: vf_fftfilt.c:132
do_eval
static void do_eval(FFTFILTContext *s, AVFilterLink *inlink, int plane)
Definition: vf_fftfilt.c:246
FFTFILTContext::rdft_hdata
FFTSample * rdft_hdata[MAX_PLANES]
Definition: vf_fftfilt.c:60
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:397
IDFT_C2R
@ IDFT_C2R
Definition: avfft.h:73
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
irdft_horizontal8
static void irdft_horizontal8(FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
Definition: vf_fftfilt.c:179
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
FFTFILTContext::planewidth
int planewidth[MAX_PLANES]
Definition: vf_fftfilt.c:49
FFTFILTContext::eval_mode
int eval_mode
Definition: vf_fftfilt.c:46
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
func2
static double(*const func2[])(void *, double, double)
Definition: af_afftfilt.c:121
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
FFTFILTContext::ihrdft
RDFTContext * ihrdft[MAX_PLANES]
Definition: vf_fftfilt.c:54
FFTFILTContext::rdft_vlen
size_t rdft_vlen[MAX_PLANES]
Definition: vf_fftfilt.c:59
FFTFILTContext::dc
int dc[MAX_PLANES]
Definition: vf_fftfilt.c:63
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:396
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
ctx
AVFormatContext * ctx
Definition: movenc.c:48
FFTFILTContext::vrdft
RDFTContext * vrdft[MAX_PLANES]
Definition: vf_fftfilt.c:53
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
MAX_PLANES
#define MAX_PLANES
Definition: vf_fftfilt.c:35
OFFSET
#define OFFSET(x)
Definition: vf_fftfilt.c:77
FFTFILTContext::depth
int depth
Definition: vf_fftfilt.c:47
AVExpr
Definition: eval.c:157
FFTFILTContext::irdft_horizontal
void(* irdft_horizontal)(struct FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
Definition: vf_fftfilt.c:69
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
VAR_N
@ VAR_N
Definition: vf_fftfilt.c:73
av_rdft_calc
void av_rdft_calc(RDFTContext *s, FFTSample *data)
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
FFTFILTContext::planeheight
int planeheight[MAX_PLANES]
Definition: vf_fftfilt.c:50
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
FFTFILTContext::rdft_hlen
size_t rdft_hlen[MAX_PLANES]
Definition: vf_fftfilt.c:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
src
#define src
Definition: vp8dsp.c:255
DFT_R2C
@ DFT_R2C
Definition: avfft.h:72
FFTSample
float FFTSample
Definition: avfft.h:35
avfft.h
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
initialize
static av_cold int initialize(AVFilterContext *ctx)
Definition: vf_fftfilt.c:212
fftfilt_options
static const AVOption fftfilt_options[]
Definition: vf_fftfilt.c:80
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
eval.h
rdft_vertical
static void rdft_vertical(FFTFILTContext *s, int h, int plane)
Definition: vf_fftfilt.c:150
FFTFILTContext::weight_str
char * weight_str[MAX_PLANES]
Definition: vf_fftfilt.c:64
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_rdft_init
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
FFTFILTContext
Definition: vf_fftfilt.c:43
FFTFILTContext::rdft_horizontal
void(* rdft_horizontal)(struct FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
Definition: vf_fftfilt.c:68
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
FFTFILTContext::hrdft
RDFTContext * hrdft[MAX_PLANES]
Definition: vf_fftfilt.c:52
VAR_Y
@ VAR_Y
Definition: vf_fftfilt.c:73
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
i
int i
Definition: input.c:407
FFTFILTContext::rdft_hbits
int rdft_hbits[MAX_PLANES]
Definition: vf_fftfilt.c:56
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
RDFTContext
Definition: rdft.h:28
common.h
EvalMode
EvalMode
Definition: af_volume.h:39
VAR_W
@ VAR_W
Definition: vf_fftfilt.c:73
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(fftfilt)
ff_vf_fftfilt
AVFilter ff_vf_fftfilt
Definition: vf_fftfilt.c:431
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
var_names
static const char *const var_names[]
Definition: vf_fftfilt.c:72
FFTFILTContext::weight_expr
AVExpr * weight_expr[MAX_PLANES]
Definition: vf_fftfilt.c:65
Y
@ Y
Definition: vf_fftfilt.c:75
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:398
weight_U
static double weight_U(void *priv, double x, double y)
Definition: vf_fftfilt.c:102
AVFilter
Filter definition.
Definition: avfilter.h:145
FFTFILTContext::weight
double * weight[MAX_PLANES]
Definition: vf_fftfilt.c:66
ret
ret
Definition: filter_design.txt:187
fftfilt_inputs
static const AVFilterPad fftfilt_inputs[]
Definition: vf_fftfilt.c:413
V
@ V
Definition: vf_fftfilt.c:75
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:408
irdft_vertical
static void irdft_vertical(FFTFILTContext *s, int h, int plane)
Definition: vf_fftfilt.c:165
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_fftfilt.c:330
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
weight_V
static double weight_V(void *priv, double x, double y)
Definition: vf_fftfilt.c:103
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
copy_rev
static void copy_rev(FFTSample *dest, int w, int w2)
Definition: vf_fftfilt.c:105
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_fftfilt.c:38
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
lum
static double lum(void *priv, double x, double y, int plane)
Definition: vf_fftfilt.c:95
FFTFILTContext::ivrdft
RDFTContext * ivrdft[MAX_PLANES]
Definition: vf_fftfilt.c:55
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_fftfilt.c:388
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
av_rdft_end
void av_rdft_end(RDFTContext *s)
weight_Y
static double weight_Y(void *priv, double x, double y)
Definition: vf_fftfilt.c:101
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:409
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
FLAGS
#define FLAGS
Definition: vf_fftfilt.c:78
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_fftfilt.c:40
FFTFILTContext::nb_planes
int nb_planes
Definition: vf_fftfilt.c:48
int
int
Definition: ffmpeg_filter.c:170
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
VAR_X
@ VAR_X
Definition: vf_fftfilt.c:73
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:407
rdft_horizontal8
static void rdft_horizontal8(FFTFILTContext *s, AVFrame *in, int w, int h, int plane)
Definition: vf_fftfilt.c:117
irdft_horizontal16
static void irdft_horizontal16(FFTFILTContext *s, AVFrame *out, int w, int h, int plane)
Definition: vf_fftfilt.c:194