FFmpeg
vf_fftfilt.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Arwa Arif <arwaarif1994@gmail.com>
3  * Copyright (c) 2017 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU Lesser General Public License as published
9  * by the Free Software Foundation; either version 2.1 of the License,
10  * or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * FFT domain filtering.
25  */
26 
27 #include "internal.h"
28 #include "video.h"
29 #include "libavutil/common.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/tx.h"
35 #include "libavutil/eval.h"
36 
37 #define MAX_THREADS 32
38 #define MAX_PLANES 4
39 
40 enum EvalMode {
44 };
45 
46 typedef struct FFTFILTContext {
47  const AVClass *class;
48 
49  int eval_mode;
50  int depth;
51  int nb_planes;
55 
60 
63 
74 
75  int dc[MAX_PLANES];
78  double *weight[MAX_PLANES];
79 
80  int (*rdft_horizontal)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
81  int (*irdft_horizontal)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
83 
84 static const char *const var_names[] = { "X", "Y", "W", "H", "N", "WS", "HS", NULL };
86 
87 enum { Y = 0, U, V };
88 
89 #define OFFSET(x) offsetof(FFTFILTContext, x)
90 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
91 
92 static const AVOption fftfilt_options[] = {
93  { "dc_Y", "adjust gain in Y plane", OFFSET(dc[Y]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
94  { "dc_U", "adjust gain in U plane", OFFSET(dc[U]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
95  { "dc_V", "adjust gain in V plane", OFFSET(dc[V]), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1000, FLAGS },
96  { "weight_Y", "set luminance expression in Y plane", OFFSET(weight_str[Y]), AV_OPT_TYPE_STRING, {.str = "1"}, 0, 0, FLAGS },
97  { "weight_U", "set chrominance expression in U plane", OFFSET(weight_str[U]), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
98  { "weight_V", "set chrominance expression in V plane", OFFSET(weight_str[V]), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
99  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, .unit = "eval" },
100  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
101  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
102  {NULL},
103 };
104 
105 AVFILTER_DEFINE_CLASS(fftfilt);
106 
107 static inline double lum(void *priv, double x, double y, int plane)
108 {
109  FFTFILTContext *s = priv;
110  return s->rdft_vdata_out[plane][(int)x * s->rdft_vstride[plane] + (int)y];
111 }
112 
113 static double weight_Y(void *priv, double x, double y) { return lum(priv, x, y, Y); }
114 static double weight_U(void *priv, double x, double y) { return lum(priv, x, y, U); }
115 static double weight_V(void *priv, double x, double y) { return lum(priv, x, y, V); }
116 
117 static void copy_rev(float *dest, int w, int w2)
118 {
119  int i;
120 
121  for (i = w; i < w + (w2-w)/2; i++)
122  dest[i] = dest[2*w - i - 1];
123 
124  for (; i < w2; i++)
125  dest[i] = dest[w2 - i];
126 }
127 
128 static int rdft_horizontal8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
129 {
130  FFTFILTContext *s = ctx->priv;
131  AVFrame *in = arg;
132 
133  for (int plane = 0; plane < s->nb_planes; plane++) {
134  const int w = s->planewidth[plane];
135  const int h = s->planeheight[plane];
136  const int slice_start = (h * jobnr) / nb_jobs;
137  const int slice_end = (h * (jobnr+1)) / nb_jobs;
138 
139  for (int i = slice_start; i < slice_end; i++) {
140  const uint8_t *src = in->data[plane] + i * in->linesize[plane];
141  float *hdata_in = s->rdft_hdata_in[plane] + i * s->rdft_hstride[plane];
142 
143  for (int j = 0; j < w; j++)
144  hdata_in[j] = src[j];
145 
146  copy_rev(s->rdft_hdata_in[plane] + i * s->rdft_hstride[plane], w, s->rdft_hlen[plane]);
147  }
148 
149  for (int i = slice_start; i < slice_end; i++)
150  s->htx_fn(s->hrdft[jobnr][plane],
151  s->rdft_hdata_out[plane] + i * s->rdft_hstride[plane],
152  s->rdft_hdata_in[plane] + i * s->rdft_hstride[plane],
153  sizeof(float));
154  }
155 
156  return 0;
157 }
158 
159 static int rdft_horizontal16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
160 {
161  FFTFILTContext *s = ctx->priv;
162  AVFrame *in = arg;
163 
164  for (int plane = 0; plane < s->nb_planes; plane++) {
165  const int w = s->planewidth[plane];
166  const int h = s->planeheight[plane];
167  const int slice_start = (h * jobnr) / nb_jobs;
168  const int slice_end = (h * (jobnr+1)) / nb_jobs;
169 
170  for (int i = slice_start; i < slice_end; i++) {
171  const uint16_t *src = (const uint16_t *)(in->data[plane] + i * in->linesize[plane]);
172  float *hdata_in = s->rdft_hdata_in[plane] + i * s->rdft_hstride[plane];
173 
174  for (int j = 0; j < w; j++)
175  hdata_in[j] = src[j];
176 
177  copy_rev(s->rdft_hdata_in[plane] + i * s->rdft_hstride[plane], w, s->rdft_hlen[plane]);
178  }
179 
180  for (int i = slice_start; i < slice_end; i++)
181  s->htx_fn(s->hrdft[jobnr][plane],
182  s->rdft_hdata_out[plane] + i * s->rdft_hstride[plane],
183  s->rdft_hdata_in[plane] + i * s->rdft_hstride[plane],
184  sizeof(float));
185  }
186 
187  return 0;
188 }
189 
190 static int irdft_horizontal8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
191 {
192  FFTFILTContext *s = ctx->priv;
193  AVFrame *out = arg;
194 
195  for (int plane = 0; plane < s->nb_planes; plane++) {
196  const int w = s->planewidth[plane];
197  const int h = s->planeheight[plane];
198  const int slice_start = (h * jobnr) / nb_jobs;
199  const int slice_end = (h * (jobnr+1)) / nb_jobs;
200 
201  for (int i = slice_start; i < slice_end; i++)
202  s->ihtx_fn(s->ihrdft[jobnr][plane],
203  s->rdft_hdata_out[plane] + i * s->rdft_hstride[plane],
204  s->rdft_hdata_in[plane] + i * s->rdft_hstride[plane],
205  sizeof(AVComplexFloat));
206 
207  for (int i = slice_start; i < slice_end; i++) {
208  const float scale = 1.f / (s->rdft_hlen[plane] * s->rdft_vlen[plane]);
209  const float *src = s->rdft_hdata_out[plane] + i * s->rdft_hstride[plane];
210  uint8_t *dst = out->data[plane] + i * out->linesize[plane];
211 
212  for (int j = 0; j < w; j++)
213  dst[j] = av_clip_uint8(lrintf(src[j] * scale));
214  }
215  }
216 
217  return 0;
218 }
219 
220 static int irdft_horizontal16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
221 {
222  FFTFILTContext *s = ctx->priv;
223  AVFrame *out = arg;
224 
225  for (int plane = 0; plane < s->nb_planes; plane++) {
226  int max = (1 << s->depth) - 1;
227  const int w = s->planewidth[plane];
228  const int h = s->planeheight[plane];
229  const int slice_start = (h * jobnr) / nb_jobs;
230  const int slice_end = (h * (jobnr+1)) / nb_jobs;
231 
232  for (int i = slice_start; i < slice_end; i++)
233  s->ihtx_fn(s->ihrdft[jobnr][plane],
234  s->rdft_hdata_out[plane] + i * s->rdft_hstride[plane],
235  s->rdft_hdata_in[plane] + i * s->rdft_hstride[plane],
236  sizeof(AVComplexFloat));
237 
238  for (int i = slice_start; i < slice_end; i++) {
239  const float scale = 1.f / (s->rdft_hlen[plane] * s->rdft_vlen[plane]);
240  const float *src = s->rdft_hdata_out[plane] + i * s->rdft_hstride[plane];
241  uint16_t *dst = (uint16_t *)(out->data[plane] + i * out->linesize[plane]);
242 
243  for (int j = 0; j < w; j++)
244  dst[j] = av_clip(lrintf(src[j] * scale), 0, max);
245  }
246  }
247 
248  return 0;
249 }
250 
252 {
253  FFTFILTContext *s = ctx->priv;
254  int ret = 0, plane;
255 
256  if (!s->dc[U] && !s->dc[V]) {
257  s->dc[U] = s->dc[Y];
258  s->dc[V] = s->dc[Y];
259  } else {
260  if (!s->dc[U]) s->dc[U] = s->dc[V];
261  if (!s->dc[V]) s->dc[V] = s->dc[U];
262  }
263 
264  if (!s->weight_str[U] && !s->weight_str[V]) {
265  s->weight_str[U] = av_strdup(s->weight_str[Y]);
266  s->weight_str[V] = av_strdup(s->weight_str[Y]);
267  } else {
268  if (!s->weight_str[U]) s->weight_str[U] = av_strdup(s->weight_str[V]);
269  if (!s->weight_str[V]) s->weight_str[V] = av_strdup(s->weight_str[U]);
270  }
271 
272  for (plane = 0; plane < 3; plane++) {
273  static double (*p[])(void *, double, double) = { weight_Y, weight_U, weight_V };
274  const char *const func2_names[] = {"weight_Y", "weight_U", "weight_V", NULL };
275  double (*func2[])(void *, double, double) = { weight_Y, weight_U, weight_V, p[plane], NULL };
276 
277  ret = av_expr_parse(&s->weight_expr[plane], s->weight_str[plane], var_names,
278  NULL, NULL, func2_names, func2, 0, ctx);
279  if (ret < 0)
280  break;
281  }
282  return ret;
283 }
284 
285 static void do_eval(FFTFILTContext *s, AVFilterLink *inlink, int plane)
286 {
287  double values[VAR_VARS_NB];
288  int i, j;
289 
290  values[VAR_N] = inlink->frame_count_out;
291  values[VAR_W] = s->planewidth[plane];
292  values[VAR_H] = s->planeheight[plane];
293  values[VAR_WS] = s->rdft_hlen[plane];
294  values[VAR_HS] = s->rdft_vlen[plane];
295 
296  for (i = 0; i < s->rdft_hlen[plane]; i++) {
297  values[VAR_X] = i;
298  for (j = 0; j < s->rdft_vlen[plane]; j++) {
299  values[VAR_Y] = j;
300  s->weight[plane][i * s->rdft_vlen[plane] + j] =
301  av_expr_eval(s->weight_expr[plane], values, s);
302  }
303  }
304 }
305 
307 {
308  FFTFILTContext *s = inlink->dst->priv;
309  const AVPixFmtDescriptor *desc;
310  int ret, i, plane;
311 
312  desc = av_pix_fmt_desc_get(inlink->format);
313  s->depth = desc->comp[0].depth;
314  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
315  s->planewidth[0] = s->planewidth[3] = inlink->w;
316  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
317  s->planeheight[0] = s->planeheight[3] = inlink->h;
318 
319  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
320  s->nb_threads = FFMIN(32, ff_filter_get_nb_threads(inlink->dst));
321 
322  for (i = 0; i < desc->nb_components; i++) {
323  int w = s->planewidth[i];
324  int h = s->planeheight[i];
325 
326  /* RDFT - Array initialization for Horizontal pass*/
327  s->rdft_hlen[i] = 1 << (32 - ff_clz(w));
328  s->rdft_hstride[i] = FFALIGN(s->rdft_hlen[i] + 2, av_cpu_max_align());
329  s->rdft_hbits[i] = av_log2(s->rdft_hlen[i]);
330  if (!(s->rdft_hdata_in[i] = av_calloc(h, s->rdft_hstride[i] * sizeof(float))))
331  return AVERROR(ENOMEM);
332 
333  if (!(s->rdft_hdata_out[i] = av_calloc(h, s->rdft_hstride[i] * sizeof(float))))
334  return AVERROR(ENOMEM);
335 
336  for (int j = 0; j < s->nb_threads; j++) {
337  float scale = 1.f, iscale = 1.f;
338 
339  ret = av_tx_init(&s->hrdft[j][i], &s->htx_fn, AV_TX_FLOAT_RDFT,
340  0, 1 << s->rdft_hbits[i], &scale, 0);
341  if (ret < 0)
342  return ret;
343  ret = av_tx_init(&s->ihrdft[j][i], &s->ihtx_fn, AV_TX_FLOAT_RDFT,
344  1, 1 << s->rdft_hbits[i], &iscale, 0);
345  if (ret < 0)
346  return ret;
347  }
348 
349  /* RDFT - Array initialization for Vertical pass*/
350  s->rdft_vlen[i] = 1 << (32 - ff_clz(h));
351  s->rdft_vstride[i] = FFALIGN(s->rdft_vlen[i] + 2, av_cpu_max_align());
352  s->rdft_vbits[i] = av_log2(s->rdft_vlen[i]);
353  if (!(s->rdft_vdata_in[i] = av_calloc(s->rdft_hstride[i], s->rdft_vstride[i] * sizeof(float))))
354  return AVERROR(ENOMEM);
355 
356  if (!(s->rdft_vdata_out[i] = av_calloc(s->rdft_hstride[i], s->rdft_vstride[i] * sizeof(float))))
357  return AVERROR(ENOMEM);
358 
359  for (int j = 0; j < s->nb_threads; j++) {
360  float scale = 1.f, iscale = 1.f;
361 
362  ret = av_tx_init(&s->vrdft[j][i], &s->vtx_fn, AV_TX_FLOAT_RDFT,
363  0, 1 << s->rdft_vbits[i], &scale, 0);
364  if (ret < 0)
365  return ret;
366  ret = av_tx_init(&s->ivrdft[j][i], &s->ivtx_fn, AV_TX_FLOAT_RDFT,
367  1, 1 << s->rdft_vbits[i], &iscale, 0);
368  if (ret < 0)
369  return ret;
370  }
371  }
372 
373  /*Luminance value - Array initialization*/
374  for (plane = 0; plane < 3; plane++) {
375  if(!(s->weight[plane] = av_calloc(s->rdft_hlen[plane], s->rdft_vlen[plane] * sizeof(double))))
376  return AVERROR(ENOMEM);
377 
378  if (s->eval_mode == EVAL_MODE_INIT)
379  do_eval(s, inlink, plane);
380  }
381 
382  if (s->depth <= 8) {
383  s->rdft_horizontal = rdft_horizontal8;
384  s->irdft_horizontal = irdft_horizontal8;
385  } else if (s->depth > 8) {
386  s->rdft_horizontal = rdft_horizontal16;
387  s->irdft_horizontal = irdft_horizontal16;
388  } else {
389  return AVERROR_BUG;
390  }
391  return 0;
392 }
393 
394 static int multiply_data(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
395 {
396  FFTFILTContext *s = ctx->priv;
397 
398  for (int plane = 0; plane < s->nb_planes; plane++) {
399  const int height = s->rdft_hlen[plane];
400  const int slice_start = (height * jobnr) / nb_jobs;
401  const int slice_end = (height * (jobnr+1)) / nb_jobs;
402  /*Change user defined parameters*/
403  for (int i = slice_start; i < slice_end; i++) {
404  const double *weight = s->weight[plane] + i * s->rdft_vlen[plane];
405  float *vdata = s->rdft_vdata_out[plane] + i * s->rdft_vstride[plane];
406 
407  for (int j = 0; j < s->rdft_vlen[plane]; j++)
408  vdata[j] *= weight[j];
409  }
410  }
411 
412  return 0;
413 }
414 
415 static int copy_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
416 {
417  FFTFILTContext *s = ctx->priv;
418 
419  for (int plane = 0; plane < s->nb_planes; plane++) {
420  const int hlen = s->rdft_hlen[plane];
421  const int vlen = s->rdft_vlen[plane];
422  const int hstride = s->rdft_hstride[plane];
423  const int vstride = s->rdft_vstride[plane];
424  const int slice_start = (hlen * jobnr) / nb_jobs;
425  const int slice_end = (hlen * (jobnr+1)) / nb_jobs;
426  const int h = s->planeheight[plane];
427  float *hdata = s->rdft_hdata_out[plane];
428  float *vdata = s->rdft_vdata_in[plane];
429 
430  for (int i = slice_start; i < slice_end; i++) {
431  for (int j = 0; j < h; j++)
432  vdata[i * vstride + j] = hdata[j * hstride + i];
433  copy_rev(vdata + i * vstride, h, vlen);
434  }
435  }
436 
437  return 0;
438 }
439 
440 static int rdft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
441 {
442  FFTFILTContext *s = ctx->priv;
443 
444  for (int plane = 0; plane < s->nb_planes; plane++) {
445  const int height = s->rdft_hlen[plane];
446  const int slice_start = (height * jobnr) / nb_jobs;
447  const int slice_end = (height * (jobnr+1)) / nb_jobs;
448 
449  for (int i = slice_start; i < slice_end; i++)
450  s->vtx_fn(s->vrdft[jobnr][plane],
451  s->rdft_vdata_out[plane] + i * s->rdft_vstride[plane],
452  s->rdft_vdata_in[plane] + i * s->rdft_vstride[plane],
453  sizeof(float));
454  }
455 
456  return 0;
457 }
458 
459 static int irdft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
460 {
461  FFTFILTContext *s = ctx->priv;
462 
463  for (int plane = 0; plane < s->nb_planes; plane++) {
464  const int height = s->rdft_hlen[plane];
465  const int slice_start = (height * jobnr) / nb_jobs;
466  const int slice_end = (height * (jobnr+1)) / nb_jobs;
467 
468  for (int i = slice_start; i < slice_end; i++)
469  s->ivtx_fn(s->ivrdft[jobnr][plane],
470  s->rdft_vdata_in[plane] + i * s->rdft_vstride[plane],
471  s->rdft_vdata_out[plane] + i * s->rdft_vstride[plane],
472  sizeof(AVComplexFloat));
473  }
474 
475  return 0;
476 }
477 
478 static int copy_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
479 {
480  FFTFILTContext *s = ctx->priv;
481 
482  for (int plane = 0; plane < s->nb_planes; plane++) {
483  const int hlen = s->rdft_hlen[plane];
484  const int hstride = s->rdft_hstride[plane];
485  const int vstride = s->rdft_vstride[plane];
486  const int slice_start = (hlen * jobnr) / nb_jobs;
487  const int slice_end = (hlen * (jobnr+1)) / nb_jobs;
488  const int h = s->planeheight[plane];
489  float *hdata = s->rdft_hdata_in[plane];
490  float *vdata = s->rdft_vdata_in[plane];
491 
492  for (int i = slice_start; i < slice_end; i++)
493  for (int j = 0; j < h; j++)
494  hdata[j * hstride + i] = vdata[i * vstride + j];
495  }
496 
497  return 0;
498 }
499 
501 {
502  AVFilterContext *ctx = inlink->dst;
503  AVFilterLink *outlink = inlink->dst->outputs[0];
504  FFTFILTContext *s = ctx->priv;
505  AVFrame *out;
506 
507  out = ff_get_video_buffer(outlink, inlink->w, inlink->h);
508  if (!out) {
509  av_frame_free(&in);
510  return AVERROR(ENOMEM);
511  }
512 
514 
515  ff_filter_execute(ctx, s->rdft_horizontal, in, NULL,
516  FFMIN(s->planeheight[1], s->nb_threads));
517 
519  FFMIN(s->planeheight[1], s->nb_threads));
520 
522  FFMIN(s->planeheight[1], s->nb_threads));
523 
524  for (int plane = 0; plane < s->nb_planes; plane++) {
525  if (s->eval_mode == EVAL_MODE_FRAME)
526  do_eval(s, inlink, plane);
527  }
528 
530  FFMIN(s->planeheight[1], s->nb_threads));
531 
532  for (int plane = 0; plane < s->nb_planes; plane++)
533  s->rdft_vdata_out[plane][0] += s->rdft_hlen[plane] * s->rdft_vlen[plane] * s->dc[plane] * (1 << (s->depth - 8));
534 
536  FFMIN(s->planeheight[1], s->nb_threads));
537 
539  FFMIN(s->planeheight[1], s->nb_threads));
540 
541  ff_filter_execute(ctx, s->irdft_horizontal, out, NULL,
542  FFMIN(s->planeheight[1], s->nb_threads));
543 
544  av_frame_free(&in);
545  return ff_filter_frame(outlink, out);
546 }
547 
549 {
550  FFTFILTContext *s = ctx->priv;
551 
552  for (int i = 0; i < MAX_PLANES; i++) {
553  av_freep(&s->rdft_hdata_in[i]);
554  av_freep(&s->rdft_vdata_in[i]);
555  av_freep(&s->rdft_hdata_out[i]);
556  av_freep(&s->rdft_vdata_out[i]);
557  av_expr_free(s->weight_expr[i]);
558  av_freep(&s->weight[i]);
559  for (int j = 0; j < s->nb_threads; j++) {
560  av_tx_uninit(&s->hrdft[j][i]);
561  av_tx_uninit(&s->ihrdft[j][i]);
562  av_tx_uninit(&s->vrdft[j][i]);
563  av_tx_uninit(&s->ivrdft[j][i]);
564  }
565  }
566 }
567 
568 static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
585 };
586 
587 static const AVFilterPad fftfilt_inputs[] = {
588  {
589  .name = "default",
590  .type = AVMEDIA_TYPE_VIDEO,
591  .config_props = config_props,
592  .filter_frame = filter_frame,
593  },
594 };
595 
597  .name = "fftfilt",
598  .description = NULL_IF_CONFIG_SMALL("Apply arbitrary expressions to pixels in frequency domain."),
599  .priv_size = sizeof(FFTFILTContext),
600  .priv_class = &fftfilt_class,
604  .init = initialize,
605  .uninit = uninit,
607 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
FFTFILTContext::hrdft
AVTXContext * hrdft[MAX_THREADS][MAX_PLANES]
Definition: vf_fftfilt.c:56
V
@ V
Definition: vf_fftfilt.c:87
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_clip
#define av_clip
Definition: common.h:99
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
rdft_horizontal8
static int rdft_horizontal8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:128
rdft_vertical
static int rdft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:440
out
FILE * out
Definition: movenc.c:55
U
@ U
Definition: vf_fftfilt.c:87
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_fftfilt.c:306
FFTFILTContext::rdft_vdata_in
float * rdft_vdata_in[MAX_PLANES]
Definition: vf_fftfilt.c:71
irdft_vertical
static int irdft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:459
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:162
AVTXContext
Definition: tx_priv.h:235
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
FFTFILTContext::ivtx_fn
av_tx_fn ivtx_fn
Definition: vf_fftfilt.c:62
ff_clz
#define ff_clz
Definition: intmath.h:143
Y
@ Y
Definition: vf_fftfilt.c:87
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
FFTFILTContext::rdft_vbits
int rdft_vbits[MAX_PLANES]
Definition: vf_fftfilt.c:65
AVOption
AVOption.
Definition: opt.h:346
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
irdft_horizontal16
static int irdft_horizontal16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:220
AVComplexFloat
Definition: tx.h:27
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
video.h
VAR_VARS_NB
@ VAR_VARS_NB
Definition: vf_fftfilt.c:85
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
func2_names
static const char *const func2_names[]
Definition: af_afftfilt.c:99
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:458
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_fftfilt.c:548
FFTFILTContext::irdft_horizontal
int(* irdft_horizontal)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:81
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:710
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3005
FFTFILTContext::nb_threads
int nb_threads
Definition: vf_fftfilt.c:52
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_fftfilt.c:42
do_eval
static void do_eval(FFTFILTContext *s, AVFilterLink *inlink, int plane)
Definition: vf_fftfilt.c:285
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:476
MAX_THREADS
#define MAX_THREADS
Definition: vf_fftfilt.c:37
FFTFILTContext::rdft_vstride
size_t rdft_vstride[MAX_PLANES]
Definition: vf_fftfilt.c:67
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:462
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:358
copy_rev
static void copy_rev(float *dest, int w, int w2)
Definition: vf_fftfilt.c:117
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
FFTFILTContext::planewidth
int planewidth[MAX_PLANES]
Definition: vf_fftfilt.c:53
FFTFILTContext::eval_mode
int eval_mode
Definition: vf_fftfilt.c:49
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
func2
static double(*const func2[])(void *, double, double)
Definition: af_afftfilt.c:100
FFTFILTContext::ivrdft
AVTXContext * ivrdft[MAX_THREADS][MAX_PLANES]
Definition: vf_fftfilt.c:59
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:490
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
FFTFILTContext::rdft_horizontal
int(* rdft_horizontal)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:80
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:491
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:59
FFTFILTContext::rdft_vlen
size_t rdft_vlen[MAX_PLANES]
Definition: vf_fftfilt.c:69
FFTFILTContext::dc
int dc[MAX_PLANES]
Definition: vf_fftfilt.c:75
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1730
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:475
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:489
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:792
MAX_PLANES
#define MAX_PLANES
Definition: vf_fftfilt.c:38
OFFSET
#define OFFSET(x)
Definition: vf_fftfilt.c:89
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:461
FFTFILTContext::depth
int depth
Definition: vf_fftfilt.c:50
AVExpr
Definition: eval.c:158
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
ff_vf_fftfilt
const AVFilter ff_vf_fftfilt
Definition: vf_fftfilt.c:596
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:459
FFTFILTContext::ihtx_fn
av_tx_fn ihtx_fn
Definition: vf_fftfilt.c:61
irdft_horizontal8
static int irdft_horizontal8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:190
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
FFTFILTContext::planeheight
int planeheight[MAX_PLANES]
Definition: vf_fftfilt.c:54
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
FFTFILTContext::rdft_hlen
size_t rdft_hlen[MAX_PLANES]
Definition: vf_fftfilt.c:68
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
copy_horizontal
static int copy_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:478
double
double
Definition: af_crystalizer.c:131
av_cpu_max_align
size_t av_cpu_max_align(void)
Get the maximum data alignment that may be required by FFmpeg.
Definition: cpu.c:268
VAR_Y
@ VAR_Y
Definition: vf_fftfilt.c:85
FFTFILTContext::htx_fn
av_tx_fn htx_fn
Definition: vf_fftfilt.c:61
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
initialize
static av_cold int initialize(AVFilterContext *ctx)
Definition: vf_fftfilt.c:251
fftfilt_options
static const AVOption fftfilt_options[]
Definition: vf_fftfilt.c:92
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1563
eval.h
FFTFILTContext::rdft_vdata_out
float * rdft_vdata_out[MAX_PLANES]
Definition: vf_fftfilt.c:73
FFTFILTContext::weight_str
char * weight_str[MAX_PLANES]
Definition: vf_fftfilt.c:76
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
cpu.h
FFTFILTContext
Definition: vf_fftfilt.c:46
FFTFILTContext::vtx_fn
av_tx_fn vtx_fn
Definition: vf_fftfilt.c:62
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
height
#define height
copy_vertical
static int copy_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:415
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
internal.h
VAR_N
@ VAR_N
Definition: vf_fftfilt.c:85
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
FFTFILTContext::rdft_hbits
int rdft_hbits[MAX_PLANES]
Definition: vf_fftfilt.c:64
FFTFILTContext::rdft_hdata_out
float * rdft_hdata_out[MAX_PLANES]
Definition: vf_fftfilt.c:72
common.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:827
EvalMode
EvalMode
Definition: af_volume.h:39
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(fftfilt)
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
var_names
static const char *const var_names[]
Definition: vf_fftfilt.c:84
FFTFILTContext::weight_expr
AVExpr * weight_expr[MAX_PLANES]
Definition: vf_fftfilt.c:77
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VAR_WS
@ VAR_WS
Definition: vf_fftfilt.c:85
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:477
weight_U
static double weight_U(void *priv, double x, double y)
Definition: vf_fftfilt.c:114
AVFilter
Filter definition.
Definition: avfilter.h:166
FFTFILTContext::vrdft
AVTXContext * vrdft[MAX_THREADS][MAX_PLANES]
Definition: vf_fftfilt.c:57
FFTFILTContext::weight
double * weight[MAX_PLANES]
Definition: vf_fftfilt.c:78
ret
ret
Definition: filter_design.txt:187
fftfilt_inputs
static const AVFilterPad fftfilt_inputs[]
Definition: vf_fftfilt.c:587
VAR_X
@ VAR_X
Definition: vf_fftfilt.c:85
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:90
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:487
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_fftfilt.c:500
FFTFILTContext::rdft_hdata_in
float * rdft_hdata_in[MAX_PLANES]
Definition: vf_fftfilt.c:70
VAR_HS
@ VAR_HS
Definition: vf_fftfilt.c:85
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:688
rdft_horizontal16
static int rdft_horizontal16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:159
pixel_fmts_fftfilt
static enum AVPixelFormat pixel_fmts_fftfilt[]
Definition: vf_fftfilt.c:568
av_clip_uint8
#define av_clip_uint8
Definition: common.h:105
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
weight_V
static double weight_V(void *priv, double x, double y)
Definition: vf_fftfilt.c:115
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
VAR_W
@ VAR_W
Definition: vf_fftfilt.c:85
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_fftfilt.c:41
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
lum
static double lum(void *priv, double x, double y, int plane)
Definition: vf_fftfilt.c:107
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
FFTFILTContext::rdft_hstride
size_t rdft_hstride[MAX_PLANES]
Definition: vf_fftfilt.c:66
weight_Y
static double weight_Y(void *priv, double x, double y)
Definition: vf_fftfilt.c:113
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:488
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
FLAGS
#define FLAGS
Definition: vf_fftfilt.c:90
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_fftfilt.c:43
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:460
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
FFTFILTContext::nb_planes
int nb_planes
Definition: vf_fftfilt.c:51
int
int
Definition: ffmpeg_filter.c:424
FFTFILTContext::ihrdft
AVTXContext * ihrdft[MAX_THREADS][MAX_PLANES]
Definition: vf_fftfilt.c:58
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:486
tx.h
multiply_data
static int multiply_data(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_fftfilt.c:394
VAR_H
@ VAR_H
Definition: vf_fftfilt.c:85