FFmpeg
vf_convolve.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/imgutils.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/tx.h"
27 
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "framesync.h"
31 #include "internal.h"
32 #include "video.h"
33 
34 #define MAX_THREADS 16
35 
36 typedef struct ConvolveContext {
37  const AVClass *class;
39 
42 
45 
46  int fft_len[4];
47  int planewidth[4];
48  int planeheight[4];
49 
50  int primarywidth[4];
51  int primaryheight[4];
52 
55 
64 
65  int depth;
66  int planes;
67  int impulse;
68  float noise;
69  int nb_planes;
70  int got_impulse[4];
71 
72  void (*get_input)(struct ConvolveContext *s, AVComplexFloat *fft_hdata,
73  AVFrame *in, int w, int h, int n, int plane, float scale);
74 
76  int w, int h, int n, int plane, float scale);
77  void (*prepare_impulse)(AVFilterContext *ctx, AVFrame *impulsepic, int plane);
78 
79  int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
81 
82 #define OFFSET(x) offsetof(ConvolveContext, x)
83 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
84 
85 static const AVOption convolve_options[] = {
86  { "planes", "set planes to convolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
87  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
88  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
89  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
90  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
91  { NULL },
92 };
93 
94 static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
113 };
114 
116 {
117  ConvolveContext *s = inlink->dst->priv;
119  const int w = inlink->w;
120  const int h = inlink->h;
121 
122  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(w, desc->log2_chroma_w);
123  s->planewidth[0] = s->planewidth[3] = w;
124  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
125  s->planeheight[0] = s->planeheight[3] = h;
126 
127  s->nb_planes = desc->nb_components;
128  s->depth = desc->comp[0].depth;
129 
130  for (int i = 0; i < s->nb_planes; i++) {
131  int w = s->planewidth[i];
132  int h = s->planeheight[i];
133  int n = FFMAX(w, h);
134 
135  s->fft_len[i] = 1 << (av_log2(2 * n - 1));
136 
137  if (!(s->fft_hdata_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
138  return AVERROR(ENOMEM);
139 
140  if (!(s->fft_hdata_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
141  return AVERROR(ENOMEM);
142 
143  if (!(s->fft_vdata_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
144  return AVERROR(ENOMEM);
145 
146  if (!(s->fft_vdata_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
147  return AVERROR(ENOMEM);
148 
149  if (!(s->fft_hdata_impulse_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
150  return AVERROR(ENOMEM);
151 
152  if (!(s->fft_vdata_impulse_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
153  return AVERROR(ENOMEM);
154 
155  if (!(s->fft_hdata_impulse_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
156  return AVERROR(ENOMEM);
157 
158  if (!(s->fft_vdata_impulse_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
159  return AVERROR(ENOMEM);
160  }
161 
162  return 0;
163 }
164 
166 {
167  AVFilterContext *ctx = inlink->dst;
168 
169  if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
170  ctx->inputs[0]->h != ctx->inputs[1]->h) {
171  av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
172  return AVERROR(EINVAL);
173  }
174 
175  return 0;
176 }
177 
178 typedef struct ThreadData {
181  int plane, n;
182 } ThreadData;
183 
184 static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
185 {
186  ConvolveContext *s = ctx->priv;
187  ThreadData *td = arg;
188  AVComplexFloat *hdata_in = td->hdata_in;
189  AVComplexFloat *hdata_out = td->hdata_out;
190  const int plane = td->plane;
191  const int n = td->n;
192  int start = (n * jobnr) / nb_jobs;
193  int end = (n * (jobnr+1)) / nb_jobs;
194  int y;
195 
196  for (y = start; y < end; y++) {
197  s->tx_fn[plane](s->fft[plane][jobnr], hdata_out + y * n, hdata_in + y * n, sizeof(float));
198  }
199 
200  return 0;
201 }
202 
203 #define SQR(x) ((x) * (x))
204 
206  AVComplexFloat *fft_hdata,
207  AVFrame *in, int w, int h,
208  int n, int plane, float scale)
209 {
210  float sum = 0.f;
211  float mean, dev;
212  int y, x;
213 
214  if (s->depth == 8) {
215  for (y = 0; y < h; y++) {
216  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
217 
218  for (x = 0; x < w; x++)
219  sum += src[x];
220  }
221 
222  mean = sum / (w * h);
223  sum = 0.f;
224  for (y = 0; y < h; y++) {
225  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
226 
227  for (x = 0; x < w; x++)
228  sum += SQR(src[x] - mean);
229  }
230 
231  dev = sqrtf(sum / (w * h));
232  scale /= dev;
233  for (y = 0; y < h; y++) {
234  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
235 
236  for (x = 0; x < w; x++) {
237  fft_hdata[y * n + x].re = (src[x] - mean) * scale;
238  fft_hdata[y * n + x].im = 0;
239  }
240 
241  for (x = w; x < n; x++) {
242  fft_hdata[y * n + x].re = 0;
243  fft_hdata[y * n + x].im = 0;
244  }
245  }
246 
247  for (y = h; y < n; y++) {
248  for (x = 0; x < n; x++) {
249  fft_hdata[y * n + x].re = 0;
250  fft_hdata[y * n + x].im = 0;
251  }
252  }
253  } else {
254  for (y = 0; y < h; y++) {
255  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
256 
257  for (x = 0; x < w; x++)
258  sum += src[x];
259  }
260 
261  mean = sum / (w * h);
262  sum = 0.f;
263  for (y = 0; y < h; y++) {
264  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
265 
266  for (x = 0; x < w; x++)
267  sum += SQR(src[x] - mean);
268  }
269 
270  dev = sqrtf(sum / (w * h));
271  scale /= dev;
272  for (y = 0; y < h; y++) {
273  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
274 
275  for (x = 0; x < w; x++) {
276  fft_hdata[y * n + x].re = (src[x] - mean) * scale;
277  fft_hdata[y * n + x].im = 0;
278  }
279 
280  for (x = w; x < n; x++) {
281  fft_hdata[y * n + x].re = 0;
282  fft_hdata[y * n + x].im = 0;
283  }
284  }
285 
286  for (y = h; y < n; y++) {
287  for (x = 0; x < n; x++) {
288  fft_hdata[y * n + x].re = 0;
289  fft_hdata[y * n + x].im = 0;
290  }
291  }
292  }
293 }
294 
295 static void get_input(ConvolveContext *s, AVComplexFloat *fft_hdata,
296  AVFrame *in, int w, int h, int n, int plane, float scale)
297 {
298  const int iw = (n - w) / 2, ih = (n - h) / 2;
299  int y, x;
300 
301  if (s->depth == 8) {
302  for (y = 0; y < h; y++) {
303  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
304 
305  for (x = 0; x < w; x++) {
306  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
307  fft_hdata[(y + ih) * n + iw + x].im = 0;
308  }
309 
310  for (x = 0; x < iw; x++) {
311  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
312  fft_hdata[(y + ih) * n + x].im = 0;
313  }
314 
315  for (x = n - iw; x < n; x++) {
316  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
317  fft_hdata[(y + ih) * n + x].im = 0;
318  }
319  }
320 
321  for (y = 0; y < ih; y++) {
322  for (x = 0; x < n; x++) {
323  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
324  fft_hdata[y * n + x].im = 0;
325  }
326  }
327 
328  for (y = n - ih; y < n; y++) {
329  for (x = 0; x < n; x++) {
330  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
331  fft_hdata[y * n + x].im = 0;
332  }
333  }
334  } else {
335  for (y = 0; y < h; y++) {
336  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
337 
338  for (x = 0; x < w; x++) {
339  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
340  fft_hdata[(y + ih) * n + iw + x].im = 0;
341  }
342 
343  for (x = 0; x < iw; x++) {
344  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
345  fft_hdata[(y + ih) * n + x].im = 0;
346  }
347 
348  for (x = n - iw; x < n; x++) {
349  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
350  fft_hdata[(y + ih) * n + x].im = 0;
351  }
352  }
353 
354  for (y = 0; y < ih; y++) {
355  for (x = 0; x < n; x++) {
356  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
357  fft_hdata[y * n + x].im = 0;
358  }
359  }
360 
361  for (y = n - ih; y < n; y++) {
362  for (x = 0; x < n; x++) {
363  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
364  fft_hdata[y * n + x].im = 0;
365  }
366  }
367  }
368 }
369 
370 static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
371 {
372  ConvolveContext *s = ctx->priv;
373  ThreadData *td = arg;
374  AVComplexFloat *hdata = td->hdata_out;
375  AVComplexFloat *vdata_in = td->vdata_in;
376  AVComplexFloat *vdata_out = td->vdata_out;
377  const int plane = td->plane;
378  const int n = td->n;
379  int start = (n * jobnr) / nb_jobs;
380  int end = (n * (jobnr+1)) / nb_jobs;
381  int y, x;
382 
383  for (y = start; y < end; y++) {
384  for (x = 0; x < n; x++) {
385  vdata_in[y * n + x].re = hdata[x * n + y].re;
386  vdata_in[y * n + x].im = hdata[x * n + y].im;
387  }
388 
389  s->tx_fn[plane](s->fft[plane][jobnr], vdata_out + y * n, vdata_in + y * n, sizeof(float));
390  }
391 
392  return 0;
393 }
394 
395 static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
396 {
397  ConvolveContext *s = ctx->priv;
398  ThreadData *td = arg;
399  AVComplexFloat *hdata = td->hdata_out;
400  AVComplexFloat *vdata_out = td->vdata_out;
401  AVComplexFloat *vdata_in = td->vdata_in;
402  const int plane = td->plane;
403  const int n = td->n;
404  int start = (n * jobnr) / nb_jobs;
405  int end = (n * (jobnr+1)) / nb_jobs;
406  int y, x;
407 
408  for (y = start; y < end; y++) {
409  s->itx_fn[plane](s->ifft[plane][jobnr], vdata_out + y * n, vdata_in + y * n, sizeof(float));
410 
411  for (x = 0; x < n; x++) {
412  hdata[x * n + y].re = vdata_out[y * n + x].re;
413  hdata[x * n + y].im = vdata_out[y * n + x].im;
414  }
415  }
416 
417  return 0;
418 }
419 
420 static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
421 {
422  ConvolveContext *s = ctx->priv;
423  ThreadData *td = arg;
424  AVComplexFloat *hdata_out = td->hdata_out;
425  AVComplexFloat *hdata_in = td->hdata_in;
426  const int plane = td->plane;
427  const int n = td->n;
428  int start = (n * jobnr) / nb_jobs;
429  int end = (n * (jobnr+1)) / nb_jobs;
430  int y;
431 
432  for (y = start; y < end; y++) {
433  s->itx_fn[plane](s->ifft[plane][jobnr], hdata_out + y * n, hdata_in + y * n, sizeof(float));
434  }
435 
436  return 0;
437 }
438 
440  int w, int h, int n, int plane, float scale)
441 {
442  const int imax = (1 << s->depth) - 1;
443 
444  scale *= imax * 16;
445  if (s->depth == 8) {
446  for (int y = 0; y < h; y++) {
447  uint8_t *dst = out->data[plane] + y * out->linesize[plane];
448  for (int x = 0; x < w; x++)
449  dst[x] = av_clip_uint8(input[y * n + x].re * scale);
450  }
451  } else {
452  for (int y = 0; y < h; y++) {
453  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
454  for (int x = 0; x < w; x++)
455  dst[x] = av_clip(input[y * n + x].re * scale, 0, imax);
456  }
457  }
458 }
459 
461  int w, int h, int n, int plane, float scale)
462 {
463  const int max = (1 << s->depth) - 1;
464  const int hh = h / 2;
465  const int hw = w / 2;
466  int y, x;
467 
468  if (s->depth == 8) {
469  for (y = 0; y < hh; y++) {
470  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane] + hw;
471  for (x = 0; x < hw; x++)
472  dst[x] = av_clip_uint8(input[y * n + x].re * scale);
473  }
474  for (y = 0; y < hh; y++) {
475  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane];
476  for (x = 0; x < hw; x++)
477  dst[x] = av_clip_uint8(input[y * n + n - hw + x].re * scale);
478  }
479  for (y = 0; y < hh; y++) {
480  uint8_t *dst = out->data[plane] + y * out->linesize[plane] + hw;
481  for (x = 0; x < hw; x++)
482  dst[x] = av_clip_uint8(input[(n - hh + y) * n + x].re * scale);
483  }
484  for (y = 0; y < hh; y++) {
485  uint8_t *dst = out->data[plane] + y * out->linesize[plane];
486  for (x = 0; x < hw; x++)
487  dst[x] = av_clip_uint8(input[(n - hh + y) * n + n - hw + x].re * scale);
488  }
489  } else {
490  for (y = 0; y < hh; y++) {
491  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane] + hw * 2);
492  for (x = 0; x < hw; x++)
493  dst[x] = av_clip(input[y * n + x].re * scale, 0, max);
494  }
495  for (y = 0; y < hh; y++) {
496  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane]);
497  for (x = 0; x < hw; x++)
498  dst[x] = av_clip(input[y * n + n - hw + x].re * scale, 0, max);
499  }
500  for (y = 0; y < hh; y++) {
501  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane] + hw * 2);
502  for (x = 0; x < hw; x++)
503  dst[x] = av_clip(input[(n - hh + y) * n + x].re * scale, 0, max);
504  }
505  for (y = 0; y < hh; y++) {
506  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
507  for (x = 0; x < hw; x++)
508  dst[x] = av_clip(input[(n - hh + y) * n + n - hw + x].re * scale, 0, max);
509  }
510  }
511 }
512 
513 static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
514 {
515  ConvolveContext *s = ctx->priv;
516  ThreadData *td = arg;
517  AVComplexFloat *input = td->hdata_in;
518  AVComplexFloat *filter = td->vdata_in;
519  const float noise = s->noise;
520  const int n = td->n;
521  int start = (n * jobnr) / nb_jobs;
522  int end = (n * (jobnr+1)) / nb_jobs;
523  int y, x;
524 
525  for (y = start; y < end; y++) {
526  int yn = y * n;
527 
528  for (x = 0; x < n; x++) {
529  float re, im, ire, iim;
530 
531  re = input[yn + x].re;
532  im = input[yn + x].im;
533  ire = filter[yn + x].re + noise;
534  iim = filter[yn + x].im;
535 
536  input[yn + x].re = ire * re - iim * im;
537  input[yn + x].im = iim * re + ire * im;
538  }
539  }
540 
541  return 0;
542 }
543 
544 static int complex_xcorrelate(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
545 {
546  ThreadData *td = arg;
547  AVComplexFloat *input = td->hdata_in;
548  AVComplexFloat *filter = td->vdata_in;
549  const int n = td->n;
550  const float scale = 1.f / (n * n);
551  int start = (n * jobnr) / nb_jobs;
552  int end = (n * (jobnr+1)) / nb_jobs;
553 
554  for (int y = start; y < end; y++) {
555  int yn = y * n;
556 
557  for (int x = 0; x < n; x++) {
558  float re, im, ire, iim;
559 
560  re = input[yn + x].re;
561  im = input[yn + x].im;
562  ire = filter[yn + x].re * scale;
563  iim = -filter[yn + x].im * scale;
564 
565  input[yn + x].re = ire * re - iim * im;
566  input[yn + x].im = iim * re + ire * im;
567  }
568  }
569 
570  return 0;
571 }
572 
573 static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
574 {
575  ConvolveContext *s = ctx->priv;
576  ThreadData *td = arg;
577  AVComplexFloat *input = td->hdata_in;
578  AVComplexFloat *filter = td->vdata_in;
579  const float noise = s->noise;
580  const int n = td->n;
581  int start = (n * jobnr) / nb_jobs;
582  int end = (n * (jobnr+1)) / nb_jobs;
583  int y, x;
584 
585  for (y = start; y < end; y++) {
586  int yn = y * n;
587 
588  for (x = 0; x < n; x++) {
589  float re, im, ire, iim, div;
590 
591  re = input[yn + x].re;
592  im = input[yn + x].im;
593  ire = filter[yn + x].re;
594  iim = filter[yn + x].im;
595  div = ire * ire + iim * iim + noise;
596 
597  input[yn + x].re = (ire * re + iim * im) / div;
598  input[yn + x].im = (ire * im - iim * re) / div;
599  }
600  }
601 
602  return 0;
603 }
604 
605 static void prepare_impulse(AVFilterContext *ctx, AVFrame *impulsepic, int plane)
606 {
607  ConvolveContext *s = ctx->priv;
608  const int n = s->fft_len[plane];
609  const int w = s->secondarywidth[plane];
610  const int h = s->secondaryheight[plane];
611  ThreadData td;
612  float total = 0;
613 
614  if (s->depth == 8) {
615  for (int y = 0; y < h; y++) {
616  const uint8_t *src = (const uint8_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
617  for (int x = 0; x < w; x++) {
618  total += src[x];
619  }
620  }
621  } else {
622  for (int y = 0; y < h; y++) {
623  const uint16_t *src = (const uint16_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
624  for (int x = 0; x < w; x++) {
625  total += src[x];
626  }
627  }
628  }
629  total = FFMAX(1, total);
630 
631  s->get_input(s, s->fft_hdata_impulse_in[plane], impulsepic, w, h, n, plane, 1.f / total);
632 
633  td.n = n;
634  td.plane = plane;
635  td.hdata_in = s->fft_hdata_impulse_in[plane];
636  td.vdata_in = s->fft_vdata_impulse_in[plane];
637  td.hdata_out = s->fft_hdata_impulse_out[plane];
638  td.vdata_out = s->fft_vdata_impulse_out[plane];
639 
644 
645  s->got_impulse[plane] = 1;
646 }
647 
648 static void prepare_secondary(AVFilterContext *ctx, AVFrame *secondary, int plane)
649 {
650  ConvolveContext *s = ctx->priv;
651  const int n = s->fft_len[plane];
652  ThreadData td;
653 
654  s->get_input(s, s->fft_hdata_impulse_in[plane], secondary,
655  s->secondarywidth[plane],
656  s->secondaryheight[plane],
657  n, plane, 1.f);
658 
659  td.n = n;
660  td.plane = plane;
661  td.hdata_in = s->fft_hdata_impulse_in[plane];
662  td.vdata_in = s->fft_vdata_impulse_in[plane];
663  td.hdata_out = s->fft_hdata_impulse_out[plane];
664  td.vdata_out = s->fft_vdata_impulse_out[plane];
665 
670 
671  s->got_impulse[plane] = 1;
672 }
673 
675 {
676  AVFilterContext *ctx = fs->parent;
677  AVFilterLink *outlink = ctx->outputs[0];
678  ConvolveContext *s = ctx->priv;
679  AVFrame *mainpic = NULL, *impulsepic = NULL;
680  int ret, plane;
681 
682  ret = ff_framesync_dualinput_get(fs, &mainpic, &impulsepic);
683  if (ret < 0)
684  return ret;
685  if (!impulsepic)
686  return ff_filter_frame(outlink, mainpic);
687 
688  for (plane = 0; plane < s->nb_planes; plane++) {
689  AVComplexFloat *filter = s->fft_vdata_impulse_out[plane];
690  AVComplexFloat *input = s->fft_vdata_out[plane];
691  const int n = s->fft_len[plane];
692  const int w = s->primarywidth[plane];
693  const int h = s->primaryheight[plane];
694  const int ow = s->planewidth[plane];
695  const int oh = s->planeheight[plane];
696  ThreadData td;
697 
698  if (!(s->planes & (1 << plane))) {
699  continue;
700  }
701 
702  td.plane = plane, td.n = n;
703  s->get_input(s, s->fft_hdata_in[plane], mainpic, w, h, n, plane, 1.f);
704 
705  td.hdata_in = s->fft_hdata_in[plane];
706  td.vdata_in = s->fft_vdata_in[plane];
707  td.hdata_out = s->fft_hdata_out[plane];
708  td.vdata_out = s->fft_vdata_out[plane];
709 
714 
715  if ((!s->impulse && !s->got_impulse[plane]) || s->impulse) {
716  s->prepare_impulse(ctx, impulsepic, plane);
717  }
718 
719  td.hdata_in = input;
720  td.vdata_in = filter;
721 
722  ff_filter_execute(ctx, s->filter, &td, NULL,
724 
725  td.hdata_in = s->fft_hdata_out[plane];
726  td.vdata_in = s->fft_vdata_out[plane];
727  td.hdata_out = s->fft_hdata_in[plane];
728  td.vdata_out = s->fft_vdata_in[plane];
729 
732 
733  td.hdata_out = s->fft_hdata_out[plane];
734  td.hdata_in = s->fft_hdata_in[plane];
735 
738 
739  s->get_output(s, s->fft_hdata_out[plane], mainpic, ow, oh, n, plane, 1.f / (n * n));
740  }
741 
742  return ff_filter_frame(outlink, mainpic);
743 }
744 
745 static int config_output(AVFilterLink *outlink)
746 {
748  AVFilterContext *ctx = outlink->src;
749  ConvolveContext *s = ctx->priv;
750  AVFilterLink *mainlink = ctx->inputs[0];
751  AVFilterLink *secondlink = ctx->inputs[1];
752  int ret, i, j;
753 
754  s->primarywidth[1] = s->primarywidth[2] = AV_CEIL_RSHIFT(mainlink->w, desc->log2_chroma_w);
755  s->primarywidth[0] = s->primarywidth[3] = mainlink->w;
756  s->primaryheight[1] = s->primaryheight[2] = AV_CEIL_RSHIFT(mainlink->h, desc->log2_chroma_h);
757  s->primaryheight[0] = s->primaryheight[3] = mainlink->h;
758 
759  s->secondarywidth[1] = s->secondarywidth[2] = AV_CEIL_RSHIFT(secondlink->w, desc->log2_chroma_w);
760  s->secondarywidth[0] = s->secondarywidth[3] = secondlink->w;
761  s->secondaryheight[1] = s->secondaryheight[2] = AV_CEIL_RSHIFT(secondlink->h, desc->log2_chroma_h);
762  s->secondaryheight[0] = s->secondaryheight[3] = secondlink->h;
763 
764  s->fs.on_event = do_convolve;
766  if (ret < 0)
767  return ret;
768  outlink->w = mainlink->w;
769  outlink->h = mainlink->h;
770  outlink->time_base = mainlink->time_base;
771  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
772  outlink->frame_rate = mainlink->frame_rate;
773 
774  if ((ret = ff_framesync_configure(&s->fs)) < 0)
775  return ret;
776 
777  for (i = 0; i < s->nb_planes; i++) {
778  for (j = 0; j < MAX_THREADS; j++) {
779  float scale;
780 
781  ret = av_tx_init(&s->fft[i][j], &s->tx_fn[i], AV_TX_FLOAT_FFT, 0, s->fft_len[i], &scale, 0);
782  if (ret < 0)
783  return ret;
784  ret = av_tx_init(&s->ifft[i][j], &s->itx_fn[i], AV_TX_FLOAT_FFT, 1, s->fft_len[i], &scale, 0);
785  if (ret < 0)
786  return ret;
787  }
788  }
789 
790  return 0;
791 }
792 
794 {
795  ConvolveContext *s = ctx->priv;
796  return ff_framesync_activate(&s->fs);
797 }
798 
800 {
801  ConvolveContext *s = ctx->priv;
802 
803  if (!strcmp(ctx->filter->name, "convolve")) {
804  s->filter = complex_multiply;
805  s->prepare_impulse = prepare_impulse;
806  s->get_input = get_input;
807  s->get_output = get_output;
808  } else if (!strcmp(ctx->filter->name, "xcorrelate")) {
809  s->filter = complex_xcorrelate;
810  s->prepare_impulse = prepare_secondary;
811  s->get_input = get_zeropadded_input;
812  s->get_output = get_xoutput;
813  } else if (!strcmp(ctx->filter->name, "deconvolve")) {
814  s->filter = complex_divide;
815  s->prepare_impulse = prepare_impulse;
816  s->get_input = get_input;
817  s->get_output = get_output;
818  } else {
819  return AVERROR_BUG;
820  }
821 
822  return 0;
823 }
824 
826 {
827  ConvolveContext *s = ctx->priv;
828  int i, j;
829 
830  for (i = 0; i < 4; i++) {
831  av_freep(&s->fft_hdata_in[i]);
832  av_freep(&s->fft_vdata_in[i]);
833  av_freep(&s->fft_hdata_out[i]);
834  av_freep(&s->fft_vdata_out[i]);
835  av_freep(&s->fft_hdata_impulse_in[i]);
836  av_freep(&s->fft_vdata_impulse_in[i]);
837  av_freep(&s->fft_hdata_impulse_out[i]);
838  av_freep(&s->fft_vdata_impulse_out[i]);
839 
840  for (j = 0; j < MAX_THREADS; j++) {
841  av_tx_uninit(&s->fft[i][j]);
842  av_tx_uninit(&s->ifft[i][j]);
843  }
844  }
845 
846  ff_framesync_uninit(&s->fs);
847 }
848 
849 static const AVFilterPad convolve_inputs[] = {
850  {
851  .name = "main",
852  .type = AVMEDIA_TYPE_VIDEO,
853  .config_props = config_input,
854  },{
855  .name = "impulse",
856  .type = AVMEDIA_TYPE_VIDEO,
857  .config_props = config_input_impulse,
858  },
859 };
860 
861 static const AVFilterPad convolve_outputs[] = {
862  {
863  .name = "default",
864  .type = AVMEDIA_TYPE_VIDEO,
865  .config_props = config_output,
866  },
867 };
868 
870 
871 #if CONFIG_CONVOLVE_FILTER
872 
874 
875 const AVFilter ff_vf_convolve = {
876  .name = "convolve",
877  .description = NULL_IF_CONFIG_SMALL("Convolve first video stream with second video stream."),
878  .preinit = convolve_framesync_preinit,
879  .init = init,
880  .uninit = uninit,
881  .activate = activate,
882  .priv_size = sizeof(ConvolveContext),
883  .priv_class = &convolve_class,
888 };
889 
890 #endif /* CONFIG_CONVOLVE_FILTER */
891 
892 #if CONFIG_DECONVOLVE_FILTER
893 
894 static const AVOption deconvolve_options[] = {
895  { "planes", "set planes to deconvolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
896  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
897  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
898  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
899  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
900  { NULL },
901 };
902 
903 FRAMESYNC_DEFINE_PURE_CLASS(deconvolve, "deconvolve", convolve, deconvolve_options);
904 
905 const AVFilter ff_vf_deconvolve = {
906  .name = "deconvolve",
907  .description = NULL_IF_CONFIG_SMALL("Deconvolve first video stream with second video stream."),
908  .preinit = convolve_framesync_preinit,
909  .init = init,
910  .uninit = uninit,
911  .activate = activate,
912  .priv_size = sizeof(ConvolveContext),
913  .priv_class = &deconvolve_class,
918 };
919 
920 #endif /* CONFIG_DECONVOLVE_FILTER */
921 
922 #if CONFIG_XCORRELATE_FILTER
923 
924 static const AVOption xcorrelate_options[] = {
925  { "planes", "set planes to cross-correlate", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
926  { "secondary", "when to process secondary frame", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
927  { "first", "process only first secondary frame, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
928  { "all", "process all secondary frames", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
929  { NULL },
930 };
931 
932 FRAMESYNC_DEFINE_PURE_CLASS(xcorrelate, "xcorrelate", convolve, xcorrelate_options);
933 
934 static int config_input_secondary(AVFilterLink *inlink)
935 {
936  AVFilterContext *ctx = inlink->dst;
937 
938  if (ctx->inputs[0]->w <= ctx->inputs[1]->w ||
939  ctx->inputs[0]->h <= ctx->inputs[1]->h) {
940  av_log(ctx, AV_LOG_ERROR, "Width and height of second input videos must be less than first input.\n");
941  return AVERROR(EINVAL);
942  }
943 
944  return 0;
945 }
946 
947 static const AVFilterPad xcorrelate_inputs[] = {
948  {
949  .name = "primary",
950  .type = AVMEDIA_TYPE_VIDEO,
951  .config_props = config_input,
952  },{
953  .name = "secondary",
954  .type = AVMEDIA_TYPE_VIDEO,
955  .config_props = config_input_secondary,
956  },
957 };
958 
959 static const AVFilterPad xcorrelate_outputs[] = {
960  {
961  .name = "default",
962  .type = AVMEDIA_TYPE_VIDEO,
963  .config_props = config_output,
964  },
965 };
966 
967 const AVFilter ff_vf_xcorrelate = {
968  .name = "xcorrelate",
969  .description = NULL_IF_CONFIG_SMALL("Cross-correlate first video stream with second video stream."),
970  .preinit = convolve_framesync_preinit,
971  .init = init,
972  .uninit = uninit,
973  .activate = activate,
974  .priv_size = sizeof(ConvolveContext),
975  .priv_class = &xcorrelate_class,
976  FILTER_INPUTS(xcorrelate_inputs),
977  FILTER_OUTPUTS(xcorrelate_outputs),
980 };
981 
982 #endif /* CONFIG_XCORRELATE_FILTER */
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:447
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:426
ThreadData::vdata_out
AVComplexFloat * vdata_out
Definition: vf_convolve.c:180
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:119
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_clip
#define av_clip
Definition: common.h:96
OFFSET
#define OFFSET(x)
Definition: vf_convolve.c:82
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
FRAMESYNC_AUXILIARY_FUNCS
#define FRAMESYNC_AUXILIARY_FUNCS(func_prefix, context, field)
Definition: framesync.h:311
ConvolveContext::filter
int(* filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:79
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:285
out
FILE * out
Definition: movenc.c:54
ff_vf_deconvolve
const AVFilter ff_vf_deconvolve
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:171
AVTXContext
Definition: tx_priv.h:110
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
complex_divide
static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:573
ConvolveContext::nb_planes
int nb_planes
Definition: vf_convolve.c:69
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:439
im
float im
Definition: fft.c:78
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:446
w
uint8_t w
Definition: llviddspenc.c:38
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:441
AVOption
AVOption.
Definition: opt.h:247
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:404
float.h
AVComplexFloat
Definition: tx.h:27
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
FFFrameSync
Frame sync structure.
Definition: framesync.h:146
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:442
ThreadData::hdata_in
AVComplexFloat * hdata_in
Definition: vf_convolve.c:179
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:228
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:384
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
ConvolveContext::primarywidth
int primarywidth[4]
Definition: vf_convolve.c:50
ThreadData::vdata_in
AVComplexFloat * vdata_in
Definition: vf_convolve.c:179
formats.h
ConvolveContext::fft_hdata_out
AVComplexFloat * fft_hdata_out[4]
Definition: vf_convolve.c:58
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:438
AVComplexFloat::im
float im
Definition: tx.h:28
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:422
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:420
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:448
pixel_fmts_fftfilt
static enum AVPixelFormat pixel_fmts_fftfilt[]
Definition: vf_convolve.c:94
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:402
ifft_horizontal
static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:420
ConvolveContext::primaryheight
int primaryheight[4]
Definition: vf_convolve.c:51
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1388
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:388
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
ConvolveContext::get_input
void(* get_input)(struct ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:72
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:407
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:416
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:102
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:424
ThreadData::plane
int plane
Definition: vf_blend.c:85
s
#define s(width, name)
Definition: cbs_vp9.c:257
ConvolveContext::fft_hdata_in
AVComplexFloat * fft_hdata_in[4]
Definition: vf_convolve.c:56
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:425
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:417
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
ConvolveContext::planes
int planes
Definition: vf_convolve.c:66
convolve_inputs
static const AVFilterPad convolve_inputs[]
Definition: vf_convolve.c:849
get_output
static void get_output(ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:460
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type AVComplexFloat.
Definition: tx.h:45
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:401
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:415
get_zeropadded_input
static void get_zeropadded_input(ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:205
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:387
ConvolveContext::secondarywidth
int secondarywidth[4]
Definition: vf_convolve.c:53
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
fft_vertical
static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:370
ConvolveContext::impulse
int impulse
Definition: vf_convolve.c:67
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
planes
static const struct @321 planes[]
ThreadData::n
int n
Definition: vf_convolve.c:181
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:385
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_convolve.c:115
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:423
ConvolveContext::fft_len
int fft_len[4]
Definition: vf_convolve.c:46
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ConvolveContext::tx_fn
av_tx_fn tx_fn[4]
Definition: vf_convolve.c:43
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
ConvolveContext::ifft
AVTXContext * ifft[4][MAX_THREADS]
Definition: vf_convolve.c:41
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ConvolveContext::prepare_impulse
void(* prepare_impulse)(AVFilterContext *ctx, AVFrame *impulsepic, int plane)
Definition: vf_convolve.c:77
config_input_impulse
static int config_input_impulse(AVFilterLink *inlink)
Definition: vf_convolve.c:165
src
#define src
Definition: vp8dsp.c:255
prepare_secondary
static void prepare_secondary(AVFilterContext *ctx, AVFrame *secondary, int plane)
Definition: vf_convolve.c:648
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:405
ConvolveContext::fft_hdata_impulse_out
AVComplexFloat * fft_hdata_impulse_out[4]
Definition: vf_convolve.c:62
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
FLAGS
#define FLAGS
Definition: vf_convolve.c:83
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:419
ConvolveContext::fft_vdata_impulse_in
AVComplexFloat * fft_vdata_impulse_in[4]
Definition: vf_convolve.c:61
ConvolveContext::fft
AVTXContext * fft[4][MAX_THREADS]
Definition: vf_convolve.c:40
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ConvolveContext::planeheight
int planeheight[4]
Definition: vf_convolve.c:48
ConvolveContext::fft_vdata_out
AVComplexFloat * fft_vdata_out[4]
Definition: vf_convolve.c:59
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:353
ConvolveContext::get_output
void(* get_output)(struct ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:75
ConvolveContext::secondaryheight
int secondaryheight[4]
Definition: vf_convolve.c:54
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:409
AVComplexFloat::re
float re
Definition: tx.h:28
FRAMESYNC_DEFINE_PURE_CLASS
#define FRAMESYNC_DEFINE_PURE_CLASS(name, desc, func_prefix, options)
Definition: framesync.h:297
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:411
convolve_outputs
static const AVFilterPad convolve_outputs[]
Definition: vf_convolve.c:861
ConvolveContext::itx_fn
av_tx_fn itx_fn[4]
Definition: vf_convolve.c:44
complex_multiply
static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:513
ConvolveContext::fs
FFFrameSync fs
Definition: vf_convolve.c:38
convolve
static void convolve(float *tgt, const float *src, int len, int n)
Definition: ra288.c:88
activate
static int activate(AVFilterContext *ctx)
Definition: vf_convolve.c:793
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:443
ConvolveContext
Definition: vf_convolve.c:36
ConvolveContext::fft_hdata_impulse_in
AVComplexFloat * fft_hdata_impulse_in[4]
Definition: vf_convolve.c:60
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets ctx to NULL, does nothing when ctx == NULL.
Definition: tx.c:213
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:227
ThreadData::hdata_out
AVComplexFloat * hdata_out
Definition: vf_convolve.c:180
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_convolve.c:825
do_convolve
static int do_convolve(FFFrameSync *fs)
Definition: vf_convolve.c:674
fft_horizontal
static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:184
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
ifft_vertical
static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:395
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:421
ConvolveContext::depth
int depth
Definition: vf_convolve.c:65
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:803
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
ConvolveContext::noise
float noise
Definition: vf_convolve.c:68
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:403
AVFilter
Filter definition.
Definition: avfilter.h:165
convolve_options
static const AVOption convolve_options[]
Definition: vf_convolve.c:85
ret
ret
Definition: filter_design.txt:187
prepare_impulse
static void prepare_impulse(AVFilterContext *ctx, AVFrame *impulsepic, int plane)
Definition: vf_convolve.c:605
SQR
#define SQR(x)
Definition: vf_convolve.c:203
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:440
get_input
static void get_input(ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:295
ff_vf_convolve
const AVFilter ff_vf_convolve
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:408
ConvolveContext::fft_vdata_in
AVComplexFloat * fft_vdata_in[4]
Definition: vf_convolve.c:57
complex_xcorrelate
static int complex_xcorrelate(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:544
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:413
ConvolveContext::fft_vdata_impulse_out
AVComplexFloat * fft_vdata_impulse_out[4]
Definition: vf_convolve.c:63
framesync.h
noise
static int noise(AVBSFContext *ctx, AVPacket *pkt)
Definition: noise_bsf.c:121
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
get_xoutput
static void get_xoutput(ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:439
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:855
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
desc
const char * desc
Definition: libsvtav1.c:79
ff_vf_xcorrelate
const AVFilter ff_vf_xcorrelate
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ConvolveContext::planewidth
int planewidth[4]
Definition: vf_convolve.c:47
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_convolve.c:799
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:154
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:410
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:414
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:336
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_convolve.c:745
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:371
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:386
MAX_THREADS
#define MAX_THREADS
Definition: vf_convolve.c:34
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
int
int
Definition: ffmpeg_filter.c:153
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ConvolveContext::got_impulse
int got_impulse[4]
Definition: vf_convolve.c:70
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:412
tx.h
re
float re
Definition: fft.c:78