FFmpeg
vf_convolve.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "config_components.h"
22 
23 #include <float.h>
24 
25 #include "libavutil/imgutils.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavutil/tx.h"
29 
30 #include "avfilter.h"
31 #include "formats.h"
32 #include "framesync.h"
33 #include "internal.h"
34 #include "video.h"
35 
36 #define MAX_THREADS 16
37 
38 typedef struct ConvolveContext {
39  const AVClass *class;
41 
44 
47 
48  int fft_len[4];
49  int planewidth[4];
50  int planeheight[4];
51 
52  int primarywidth[4];
53  int primaryheight[4];
54 
57 
66 
67  int depth;
68  int planes;
69  int impulse;
70  float noise;
71  int nb_planes;
72  int got_impulse[4];
73 
74  void (*get_input)(struct ConvolveContext *s, AVComplexFloat *fft_hdata,
75  AVFrame *in, int w, int h, int n, int plane, float scale);
76 
78  int w, int h, int n, int plane, float scale);
79  void (*prepare_impulse)(AVFilterContext *ctx, AVFrame *impulsepic, int plane);
80 
81  int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
83 
84 #define OFFSET(x) offsetof(ConvolveContext, x)
85 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
86 
87 static const AVOption convolve_options[] = {
88  { "planes", "set planes to convolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
89  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
90  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
91  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
92  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
93  { NULL },
94 };
95 
96 static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
115 };
116 
118 {
119  ConvolveContext *s = inlink->dst->priv;
121  const int w = inlink->w;
122  const int h = inlink->h;
123 
124  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(w, desc->log2_chroma_w);
125  s->planewidth[0] = s->planewidth[3] = w;
126  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
127  s->planeheight[0] = s->planeheight[3] = h;
128 
129  s->nb_planes = desc->nb_components;
130  s->depth = desc->comp[0].depth;
131 
132  for (int i = 0; i < s->nb_planes; i++) {
133  int w = s->planewidth[i];
134  int h = s->planeheight[i];
135  int n = FFMAX(w, h);
136 
137  s->fft_len[i] = 1 << (av_log2(2 * n - 1));
138 
139  if (!(s->fft_hdata_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
140  return AVERROR(ENOMEM);
141 
142  if (!(s->fft_hdata_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
143  return AVERROR(ENOMEM);
144 
145  if (!(s->fft_vdata_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
146  return AVERROR(ENOMEM);
147 
148  if (!(s->fft_vdata_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
149  return AVERROR(ENOMEM);
150 
151  if (!(s->fft_hdata_impulse_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
152  return AVERROR(ENOMEM);
153 
154  if (!(s->fft_vdata_impulse_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
155  return AVERROR(ENOMEM);
156 
157  if (!(s->fft_hdata_impulse_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
158  return AVERROR(ENOMEM);
159 
160  if (!(s->fft_vdata_impulse_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
161  return AVERROR(ENOMEM);
162  }
163 
164  return 0;
165 }
166 
168 {
169  AVFilterContext *ctx = inlink->dst;
170 
171  if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
172  ctx->inputs[0]->h != ctx->inputs[1]->h) {
173  av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
174  return AVERROR(EINVAL);
175  }
176 
177  return 0;
178 }
179 
180 typedef struct ThreadData {
183  int plane, n;
184 } ThreadData;
185 
186 static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
187 {
188  ConvolveContext *s = ctx->priv;
189  ThreadData *td = arg;
190  AVComplexFloat *hdata_in = td->hdata_in;
191  AVComplexFloat *hdata_out = td->hdata_out;
192  const int plane = td->plane;
193  const int n = td->n;
194  int start = (n * jobnr) / nb_jobs;
195  int end = (n * (jobnr+1)) / nb_jobs;
196  int y;
197 
198  for (y = start; y < end; y++) {
199  s->tx_fn[plane](s->fft[plane][jobnr], hdata_out + y * n, hdata_in + y * n, sizeof(AVComplexFloat));
200  }
201 
202  return 0;
203 }
204 
205 #define SQR(x) ((x) * (x))
206 
208  AVComplexFloat *fft_hdata,
209  AVFrame *in, int w, int h,
210  int n, int plane, float scale)
211 {
212  float sum = 0.f;
213  float mean, dev;
214  int y, x;
215 
216  if (s->depth == 8) {
217  for (y = 0; y < h; y++) {
218  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
219 
220  for (x = 0; x < w; x++)
221  sum += src[x];
222  }
223 
224  mean = sum / (w * h);
225  sum = 0.f;
226  for (y = 0; y < h; y++) {
227  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
228 
229  for (x = 0; x < w; x++)
230  sum += SQR(src[x] - mean);
231  }
232 
233  dev = sqrtf(sum / (w * h));
234  scale /= dev;
235  for (y = 0; y < h; y++) {
236  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
237 
238  for (x = 0; x < w; x++) {
239  fft_hdata[y * n + x].re = (src[x] - mean) * scale;
240  fft_hdata[y * n + x].im = 0;
241  }
242 
243  for (x = w; x < n; x++) {
244  fft_hdata[y * n + x].re = 0;
245  fft_hdata[y * n + x].im = 0;
246  }
247  }
248 
249  for (y = h; y < n; y++) {
250  for (x = 0; x < n; x++) {
251  fft_hdata[y * n + x].re = 0;
252  fft_hdata[y * n + x].im = 0;
253  }
254  }
255  } else {
256  for (y = 0; y < h; y++) {
257  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
258 
259  for (x = 0; x < w; x++)
260  sum += src[x];
261  }
262 
263  mean = sum / (w * h);
264  sum = 0.f;
265  for (y = 0; y < h; y++) {
266  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
267 
268  for (x = 0; x < w; x++)
269  sum += SQR(src[x] - mean);
270  }
271 
272  dev = sqrtf(sum / (w * h));
273  scale /= dev;
274  for (y = 0; y < h; y++) {
275  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
276 
277  for (x = 0; x < w; x++) {
278  fft_hdata[y * n + x].re = (src[x] - mean) * scale;
279  fft_hdata[y * n + x].im = 0;
280  }
281 
282  for (x = w; x < n; x++) {
283  fft_hdata[y * n + x].re = 0;
284  fft_hdata[y * n + x].im = 0;
285  }
286  }
287 
288  for (y = h; y < n; y++) {
289  for (x = 0; x < n; x++) {
290  fft_hdata[y * n + x].re = 0;
291  fft_hdata[y * n + x].im = 0;
292  }
293  }
294  }
295 }
296 
297 static void get_input(ConvolveContext *s, AVComplexFloat *fft_hdata,
298  AVFrame *in, int w, int h, int n, int plane, float scale)
299 {
300  const int iw = (n - w) / 2, ih = (n - h) / 2;
301  int y, x;
302 
303  if (s->depth == 8) {
304  for (y = 0; y < h; y++) {
305  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
306 
307  for (x = 0; x < w; x++) {
308  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
309  fft_hdata[(y + ih) * n + iw + x].im = 0;
310  }
311 
312  for (x = 0; x < iw; x++) {
313  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
314  fft_hdata[(y + ih) * n + x].im = 0;
315  }
316 
317  for (x = n - iw; x < n; x++) {
318  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
319  fft_hdata[(y + ih) * n + x].im = 0;
320  }
321  }
322 
323  for (y = 0; y < ih; y++) {
324  for (x = 0; x < n; x++) {
325  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
326  fft_hdata[y * n + x].im = 0;
327  }
328  }
329 
330  for (y = n - ih; y < n; y++) {
331  for (x = 0; x < n; x++) {
332  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
333  fft_hdata[y * n + x].im = 0;
334  }
335  }
336  } else {
337  for (y = 0; y < h; y++) {
338  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
339 
340  for (x = 0; x < w; x++) {
341  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
342  fft_hdata[(y + ih) * n + iw + x].im = 0;
343  }
344 
345  for (x = 0; x < iw; x++) {
346  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
347  fft_hdata[(y + ih) * n + x].im = 0;
348  }
349 
350  for (x = n - iw; x < n; x++) {
351  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
352  fft_hdata[(y + ih) * n + x].im = 0;
353  }
354  }
355 
356  for (y = 0; y < ih; y++) {
357  for (x = 0; x < n; x++) {
358  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
359  fft_hdata[y * n + x].im = 0;
360  }
361  }
362 
363  for (y = n - ih; y < n; y++) {
364  for (x = 0; x < n; x++) {
365  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
366  fft_hdata[y * n + x].im = 0;
367  }
368  }
369  }
370 }
371 
372 static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
373 {
374  ConvolveContext *s = ctx->priv;
375  ThreadData *td = arg;
376  AVComplexFloat *hdata = td->hdata_out;
377  AVComplexFloat *vdata_in = td->vdata_in;
378  AVComplexFloat *vdata_out = td->vdata_out;
379  const int plane = td->plane;
380  const int n = td->n;
381  int start = (n * jobnr) / nb_jobs;
382  int end = (n * (jobnr+1)) / nb_jobs;
383  int y, x;
384 
385  for (y = start; y < end; y++) {
386  for (x = 0; x < n; x++) {
387  vdata_in[y * n + x].re = hdata[x * n + y].re;
388  vdata_in[y * n + x].im = hdata[x * n + y].im;
389  }
390 
391  s->tx_fn[plane](s->fft[plane][jobnr], vdata_out + y * n, vdata_in + y * n, sizeof(AVComplexFloat));
392  }
393 
394  return 0;
395 }
396 
397 static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
398 {
399  ConvolveContext *s = ctx->priv;
400  ThreadData *td = arg;
401  AVComplexFloat *hdata = td->hdata_out;
402  AVComplexFloat *vdata_out = td->vdata_out;
403  AVComplexFloat *vdata_in = td->vdata_in;
404  const int plane = td->plane;
405  const int n = td->n;
406  int start = (n * jobnr) / nb_jobs;
407  int end = (n * (jobnr+1)) / nb_jobs;
408  int y, x;
409 
410  for (y = start; y < end; y++) {
411  s->itx_fn[plane](s->ifft[plane][jobnr], vdata_out + y * n, vdata_in + y * n, sizeof(AVComplexFloat));
412 
413  for (x = 0; x < n; x++) {
414  hdata[x * n + y].re = vdata_out[y * n + x].re;
415  hdata[x * n + y].im = vdata_out[y * n + x].im;
416  }
417  }
418 
419  return 0;
420 }
421 
422 static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
423 {
424  ConvolveContext *s = ctx->priv;
425  ThreadData *td = arg;
426  AVComplexFloat *hdata_out = td->hdata_out;
427  AVComplexFloat *hdata_in = td->hdata_in;
428  const int plane = td->plane;
429  const int n = td->n;
430  int start = (n * jobnr) / nb_jobs;
431  int end = (n * (jobnr+1)) / nb_jobs;
432  int y;
433 
434  for (y = start; y < end; y++) {
435  s->itx_fn[plane](s->ifft[plane][jobnr], hdata_out + y * n, hdata_in + y * n, sizeof(AVComplexFloat));
436  }
437 
438  return 0;
439 }
440 
442  int w, int h, int n, int plane, float scale)
443 {
444  const int imax = (1 << s->depth) - 1;
445 
446  scale *= imax * 16;
447  if (s->depth == 8) {
448  for (int y = 0; y < h; y++) {
449  uint8_t *dst = out->data[plane] + y * out->linesize[plane];
450  for (int x = 0; x < w; x++)
451  dst[x] = av_clip_uint8(input[y * n + x].re * scale);
452  }
453  } else {
454  for (int y = 0; y < h; y++) {
455  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
456  for (int x = 0; x < w; x++)
457  dst[x] = av_clip(input[y * n + x].re * scale, 0, imax);
458  }
459  }
460 }
461 
463  int w, int h, int n, int plane, float scale)
464 {
465  const int max = (1 << s->depth) - 1;
466  const int hh = h / 2;
467  const int hw = w / 2;
468  int y, x;
469 
470  if (s->depth == 8) {
471  for (y = 0; y < hh; y++) {
472  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane] + hw;
473  for (x = 0; x < hw; x++)
474  dst[x] = av_clip_uint8(input[y * n + x].re * scale);
475  }
476  for (y = 0; y < hh; y++) {
477  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane];
478  for (x = 0; x < hw; x++)
479  dst[x] = av_clip_uint8(input[y * n + n - hw + x].re * scale);
480  }
481  for (y = 0; y < hh; y++) {
482  uint8_t *dst = out->data[plane] + y * out->linesize[plane] + hw;
483  for (x = 0; x < hw; x++)
484  dst[x] = av_clip_uint8(input[(n - hh + y) * n + x].re * scale);
485  }
486  for (y = 0; y < hh; y++) {
487  uint8_t *dst = out->data[plane] + y * out->linesize[plane];
488  for (x = 0; x < hw; x++)
489  dst[x] = av_clip_uint8(input[(n - hh + y) * n + n - hw + x].re * scale);
490  }
491  } else {
492  for (y = 0; y < hh; y++) {
493  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane] + hw * 2);
494  for (x = 0; x < hw; x++)
495  dst[x] = av_clip(input[y * n + x].re * scale, 0, max);
496  }
497  for (y = 0; y < hh; y++) {
498  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane]);
499  for (x = 0; x < hw; x++)
500  dst[x] = av_clip(input[y * n + n - hw + x].re * scale, 0, max);
501  }
502  for (y = 0; y < hh; y++) {
503  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane] + hw * 2);
504  for (x = 0; x < hw; x++)
505  dst[x] = av_clip(input[(n - hh + y) * n + x].re * scale, 0, max);
506  }
507  for (y = 0; y < hh; y++) {
508  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
509  for (x = 0; x < hw; x++)
510  dst[x] = av_clip(input[(n - hh + y) * n + n - hw + x].re * scale, 0, max);
511  }
512  }
513 }
514 
515 static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
516 {
517  ConvolveContext *s = ctx->priv;
518  ThreadData *td = arg;
519  AVComplexFloat *input = td->hdata_in;
520  AVComplexFloat *filter = td->vdata_in;
521  const float noise = s->noise;
522  const int n = td->n;
523  int start = (n * jobnr) / nb_jobs;
524  int end = (n * (jobnr+1)) / nb_jobs;
525  int y, x;
526 
527  for (y = start; y < end; y++) {
528  int yn = y * n;
529 
530  for (x = 0; x < n; x++) {
531  float re, im, ire, iim;
532 
533  re = input[yn + x].re;
534  im = input[yn + x].im;
535  ire = filter[yn + x].re + noise;
536  iim = filter[yn + x].im;
537 
538  input[yn + x].re = ire * re - iim * im;
539  input[yn + x].im = iim * re + ire * im;
540  }
541  }
542 
543  return 0;
544 }
545 
546 static int complex_xcorrelate(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
547 {
548  ThreadData *td = arg;
549  AVComplexFloat *input = td->hdata_in;
550  AVComplexFloat *filter = td->vdata_in;
551  const int n = td->n;
552  const float scale = 1.f / (n * n);
553  int start = (n * jobnr) / nb_jobs;
554  int end = (n * (jobnr+1)) / nb_jobs;
555 
556  for (int y = start; y < end; y++) {
557  int yn = y * n;
558 
559  for (int x = 0; x < n; x++) {
560  float re, im, ire, iim;
561 
562  re = input[yn + x].re;
563  im = input[yn + x].im;
564  ire = filter[yn + x].re * scale;
565  iim = -filter[yn + x].im * scale;
566 
567  input[yn + x].re = ire * re - iim * im;
568  input[yn + x].im = iim * re + ire * im;
569  }
570  }
571 
572  return 0;
573 }
574 
575 static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
576 {
577  ConvolveContext *s = ctx->priv;
578  ThreadData *td = arg;
579  AVComplexFloat *input = td->hdata_in;
580  AVComplexFloat *filter = td->vdata_in;
581  const float noise = s->noise;
582  const int n = td->n;
583  int start = (n * jobnr) / nb_jobs;
584  int end = (n * (jobnr+1)) / nb_jobs;
585  int y, x;
586 
587  for (y = start; y < end; y++) {
588  int yn = y * n;
589 
590  for (x = 0; x < n; x++) {
591  float re, im, ire, iim, div;
592 
593  re = input[yn + x].re;
594  im = input[yn + x].im;
595  ire = filter[yn + x].re;
596  iim = filter[yn + x].im;
597  div = ire * ire + iim * iim + noise;
598 
599  input[yn + x].re = (ire * re + iim * im) / div;
600  input[yn + x].im = (ire * im - iim * re) / div;
601  }
602  }
603 
604  return 0;
605 }
606 
607 static void prepare_impulse(AVFilterContext *ctx, AVFrame *impulsepic, int plane)
608 {
609  ConvolveContext *s = ctx->priv;
610  const int n = s->fft_len[plane];
611  const int w = s->secondarywidth[plane];
612  const int h = s->secondaryheight[plane];
613  ThreadData td;
614  float total = 0;
615 
616  if (s->depth == 8) {
617  for (int y = 0; y < h; y++) {
618  const uint8_t *src = (const uint8_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
619  for (int x = 0; x < w; x++) {
620  total += src[x];
621  }
622  }
623  } else {
624  for (int y = 0; y < h; y++) {
625  const uint16_t *src = (const uint16_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
626  for (int x = 0; x < w; x++) {
627  total += src[x];
628  }
629  }
630  }
631  total = FFMAX(1, total);
632 
633  s->get_input(s, s->fft_hdata_impulse_in[plane], impulsepic, w, h, n, plane, 1.f / total);
634 
635  td.n = n;
636  td.plane = plane;
637  td.hdata_in = s->fft_hdata_impulse_in[plane];
638  td.vdata_in = s->fft_vdata_impulse_in[plane];
639  td.hdata_out = s->fft_hdata_impulse_out[plane];
640  td.vdata_out = s->fft_vdata_impulse_out[plane];
641 
646 
647  s->got_impulse[plane] = 1;
648 }
649 
650 static void prepare_secondary(AVFilterContext *ctx, AVFrame *secondary, int plane)
651 {
652  ConvolveContext *s = ctx->priv;
653  const int n = s->fft_len[plane];
654  ThreadData td;
655 
656  s->get_input(s, s->fft_hdata_impulse_in[plane], secondary,
657  s->secondarywidth[plane],
658  s->secondaryheight[plane],
659  n, plane, 1.f);
660 
661  td.n = n;
662  td.plane = plane;
663  td.hdata_in = s->fft_hdata_impulse_in[plane];
664  td.vdata_in = s->fft_vdata_impulse_in[plane];
665  td.hdata_out = s->fft_hdata_impulse_out[plane];
666  td.vdata_out = s->fft_vdata_impulse_out[plane];
667 
672 
673  s->got_impulse[plane] = 1;
674 }
675 
677 {
678  AVFilterContext *ctx = fs->parent;
679  AVFilterLink *outlink = ctx->outputs[0];
680  ConvolveContext *s = ctx->priv;
681  AVFrame *mainpic = NULL, *impulsepic = NULL;
682  int ret, plane;
683 
684  ret = ff_framesync_dualinput_get(fs, &mainpic, &impulsepic);
685  if (ret < 0)
686  return ret;
687  if (!impulsepic)
688  return ff_filter_frame(outlink, mainpic);
689 
690  for (plane = 0; plane < s->nb_planes; plane++) {
691  AVComplexFloat *filter = s->fft_vdata_impulse_out[plane];
692  AVComplexFloat *input = s->fft_vdata_out[plane];
693  const int n = s->fft_len[plane];
694  const int w = s->primarywidth[plane];
695  const int h = s->primaryheight[plane];
696  const int ow = s->planewidth[plane];
697  const int oh = s->planeheight[plane];
698  ThreadData td;
699 
700  if (!(s->planes & (1 << plane))) {
701  continue;
702  }
703 
704  td.plane = plane, td.n = n;
705  s->get_input(s, s->fft_hdata_in[plane], mainpic, w, h, n, plane, 1.f);
706 
707  td.hdata_in = s->fft_hdata_in[plane];
708  td.vdata_in = s->fft_vdata_in[plane];
709  td.hdata_out = s->fft_hdata_out[plane];
710  td.vdata_out = s->fft_vdata_out[plane];
711 
716 
717  if ((!s->impulse && !s->got_impulse[plane]) || s->impulse) {
718  s->prepare_impulse(ctx, impulsepic, plane);
719  }
720 
721  td.hdata_in = input;
722  td.vdata_in = filter;
723 
724  ff_filter_execute(ctx, s->filter, &td, NULL,
726 
727  td.hdata_in = s->fft_hdata_out[plane];
728  td.vdata_in = s->fft_vdata_out[plane];
729  td.hdata_out = s->fft_hdata_in[plane];
730  td.vdata_out = s->fft_vdata_in[plane];
731 
734 
735  td.hdata_out = s->fft_hdata_out[plane];
736  td.hdata_in = s->fft_hdata_in[plane];
737 
740 
741  s->get_output(s, s->fft_hdata_out[plane], mainpic, ow, oh, n, plane, 1.f / (n * n));
742  }
743 
744  return ff_filter_frame(outlink, mainpic);
745 }
746 
747 static int config_output(AVFilterLink *outlink)
748 {
750  AVFilterContext *ctx = outlink->src;
751  ConvolveContext *s = ctx->priv;
752  AVFilterLink *mainlink = ctx->inputs[0];
753  AVFilterLink *secondlink = ctx->inputs[1];
754  int ret, i, j;
755 
756  s->primarywidth[1] = s->primarywidth[2] = AV_CEIL_RSHIFT(mainlink->w, desc->log2_chroma_w);
757  s->primarywidth[0] = s->primarywidth[3] = mainlink->w;
758  s->primaryheight[1] = s->primaryheight[2] = AV_CEIL_RSHIFT(mainlink->h, desc->log2_chroma_h);
759  s->primaryheight[0] = s->primaryheight[3] = mainlink->h;
760 
761  s->secondarywidth[1] = s->secondarywidth[2] = AV_CEIL_RSHIFT(secondlink->w, desc->log2_chroma_w);
762  s->secondarywidth[0] = s->secondarywidth[3] = secondlink->w;
763  s->secondaryheight[1] = s->secondaryheight[2] = AV_CEIL_RSHIFT(secondlink->h, desc->log2_chroma_h);
764  s->secondaryheight[0] = s->secondaryheight[3] = secondlink->h;
765 
766  s->fs.on_event = do_convolve;
768  if (ret < 0)
769  return ret;
770  outlink->w = mainlink->w;
771  outlink->h = mainlink->h;
772  outlink->time_base = mainlink->time_base;
773  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
774  outlink->frame_rate = mainlink->frame_rate;
775 
776  if ((ret = ff_framesync_configure(&s->fs)) < 0)
777  return ret;
778 
779  for (i = 0; i < s->nb_planes; i++) {
780  for (j = 0; j < MAX_THREADS; j++) {
781  float scale = 1.f;
782 
783  ret = av_tx_init(&s->fft[i][j], &s->tx_fn[i], AV_TX_FLOAT_FFT, 0, s->fft_len[i], &scale, 0);
784  if (ret < 0)
785  return ret;
786  ret = av_tx_init(&s->ifft[i][j], &s->itx_fn[i], AV_TX_FLOAT_FFT, 1, s->fft_len[i], &scale, 0);
787  if (ret < 0)
788  return ret;
789  }
790  }
791 
792  return 0;
793 }
794 
796 {
797  ConvolveContext *s = ctx->priv;
798  return ff_framesync_activate(&s->fs);
799 }
800 
802 {
803  ConvolveContext *s = ctx->priv;
804 
805  if (!strcmp(ctx->filter->name, "convolve")) {
806  s->filter = complex_multiply;
807  s->prepare_impulse = prepare_impulse;
808  s->get_input = get_input;
809  s->get_output = get_output;
810  } else if (!strcmp(ctx->filter->name, "xcorrelate")) {
811  s->filter = complex_xcorrelate;
812  s->prepare_impulse = prepare_secondary;
813  s->get_input = get_zeropadded_input;
814  s->get_output = get_xoutput;
815  } else if (!strcmp(ctx->filter->name, "deconvolve")) {
816  s->filter = complex_divide;
817  s->prepare_impulse = prepare_impulse;
818  s->get_input = get_input;
819  s->get_output = get_output;
820  } else {
821  return AVERROR_BUG;
822  }
823 
824  return 0;
825 }
826 
828 {
829  ConvolveContext *s = ctx->priv;
830  int i, j;
831 
832  for (i = 0; i < 4; i++) {
833  av_freep(&s->fft_hdata_in[i]);
834  av_freep(&s->fft_vdata_in[i]);
835  av_freep(&s->fft_hdata_out[i]);
836  av_freep(&s->fft_vdata_out[i]);
837  av_freep(&s->fft_hdata_impulse_in[i]);
838  av_freep(&s->fft_vdata_impulse_in[i]);
839  av_freep(&s->fft_hdata_impulse_out[i]);
840  av_freep(&s->fft_vdata_impulse_out[i]);
841 
842  for (j = 0; j < MAX_THREADS; j++) {
843  av_tx_uninit(&s->fft[i][j]);
844  av_tx_uninit(&s->ifft[i][j]);
845  }
846  }
847 
848  ff_framesync_uninit(&s->fs);
849 }
850 
851 static const AVFilterPad convolve_inputs[] = {
852  {
853  .name = "main",
854  .type = AVMEDIA_TYPE_VIDEO,
855  .config_props = config_input,
856  },{
857  .name = "impulse",
858  .type = AVMEDIA_TYPE_VIDEO,
859  .config_props = config_input_impulse,
860  },
861 };
862 
863 static const AVFilterPad convolve_outputs[] = {
864  {
865  .name = "default",
866  .type = AVMEDIA_TYPE_VIDEO,
867  .config_props = config_output,
868  },
869 };
870 
872 
873 #if CONFIG_CONVOLVE_FILTER
874 
876 
877 const AVFilter ff_vf_convolve = {
878  .name = "convolve",
879  .description = NULL_IF_CONFIG_SMALL("Convolve first video stream with second video stream."),
880  .preinit = convolve_framesync_preinit,
881  .init = init,
882  .uninit = uninit,
883  .activate = activate,
884  .priv_size = sizeof(ConvolveContext),
885  .priv_class = &convolve_class,
890 };
891 
892 #endif /* CONFIG_CONVOLVE_FILTER */
893 
894 #if CONFIG_DECONVOLVE_FILTER
895 
896 static const AVOption deconvolve_options[] = {
897  { "planes", "set planes to deconvolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
898  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
899  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
900  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
901  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
902  { NULL },
903 };
904 
905 FRAMESYNC_DEFINE_PURE_CLASS(deconvolve, "deconvolve", convolve, deconvolve_options);
906 
907 const AVFilter ff_vf_deconvolve = {
908  .name = "deconvolve",
909  .description = NULL_IF_CONFIG_SMALL("Deconvolve first video stream with second video stream."),
910  .preinit = convolve_framesync_preinit,
911  .init = init,
912  .uninit = uninit,
913  .activate = activate,
914  .priv_size = sizeof(ConvolveContext),
915  .priv_class = &deconvolve_class,
920 };
921 
922 #endif /* CONFIG_DECONVOLVE_FILTER */
923 
924 #if CONFIG_XCORRELATE_FILTER
925 
926 static const AVOption xcorrelate_options[] = {
927  { "planes", "set planes to cross-correlate", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
928  { "secondary", "when to process secondary frame", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
929  { "first", "process only first secondary frame, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
930  { "all", "process all secondary frames", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
931  { NULL },
932 };
933 
934 FRAMESYNC_DEFINE_PURE_CLASS(xcorrelate, "xcorrelate", convolve, xcorrelate_options);
935 
936 static int config_input_secondary(AVFilterLink *inlink)
937 {
938  AVFilterContext *ctx = inlink->dst;
939 
940  if (ctx->inputs[0]->w <= ctx->inputs[1]->w ||
941  ctx->inputs[0]->h <= ctx->inputs[1]->h) {
942  av_log(ctx, AV_LOG_ERROR, "Width and height of second input videos must be less than first input.\n");
943  return AVERROR(EINVAL);
944  }
945 
946  return 0;
947 }
948 
949 static const AVFilterPad xcorrelate_inputs[] = {
950  {
951  .name = "primary",
952  .type = AVMEDIA_TYPE_VIDEO,
953  .config_props = config_input,
954  },{
955  .name = "secondary",
956  .type = AVMEDIA_TYPE_VIDEO,
957  .config_props = config_input_secondary,
958  },
959 };
960 
961 static const AVFilterPad xcorrelate_outputs[] = {
962  {
963  .name = "default",
964  .type = AVMEDIA_TYPE_VIDEO,
965  .config_props = config_output,
966  },
967 };
968 
969 const AVFilter ff_vf_xcorrelate = {
970  .name = "xcorrelate",
971  .description = NULL_IF_CONFIG_SMALL("Cross-correlate first video stream with second video stream."),
972  .preinit = convolve_framesync_preinit,
973  .init = init,
974  .uninit = uninit,
975  .activate = activate,
976  .priv_size = sizeof(ConvolveContext),
977  .priv_class = &xcorrelate_class,
978  FILTER_INPUTS(xcorrelate_inputs),
979  FILTER_OUTPUTS(xcorrelate_outputs),
982 };
983 
984 #endif /* CONFIG_XCORRELATE_FILTER */
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:502
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:481
ThreadData::vdata_out
AVComplexFloat * vdata_out
Definition: vf_convolve.c:182
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:134
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
planes
static const struct @346 planes[]
av_clip
#define av_clip
Definition: common.h:95
OFFSET
#define OFFSET(x)
Definition: vf_convolve.c:84
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
FRAMESYNC_AUXILIARY_FUNCS
#define FRAMESYNC_AUXILIARY_FUNCS(func_prefix, context, field)
Definition: framesync.h:334
ConvolveContext::filter
int(* filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:81
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:304
out
FILE * out
Definition: movenc.c:54
ff_vf_deconvolve
const AVFilter ff_vf_deconvolve
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:174
AVTXContext
Definition: tx_priv.h:228
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
complex_divide
static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:575
ConvolveContext::nb_planes
int nb_planes
Definition: vf_convolve.c:71
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:494
im
float im
Definition: fft.c:79
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:501
w
uint8_t w
Definition: llviddspenc.c:38
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:496
AVOption
AVOption.
Definition: opt.h:251
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
float.h
AVComplexFloat
Definition: tx.h:27
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
FFFrameSync
Frame sync structure.
Definition: framesync.h:168
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:497
ThreadData::hdata_in
AVComplexFloat * hdata_in
Definition: vf_convolve.c:181
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:883
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:439
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
ConvolveContext::primarywidth
int primarywidth[4]
Definition: vf_convolve.c:52
ThreadData::vdata_in
AVComplexFloat * vdata_in
Definition: vf_convolve.c:181
formats.h
ConvolveContext::fft_hdata_out
AVComplexFloat * fft_hdata_out[4]
Definition: vf_convolve.c:60
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:493
AVComplexFloat::im
float im
Definition: tx.h:28
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:477
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:475
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:503
pixel_fmts_fftfilt
static enum AVPixelFormat pixel_fmts_fftfilt[]
Definition: vf_convolve.c:96
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:457
ifft_horizontal
static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:422
ConvolveContext::primaryheight
int primaryheight[4]
Definition: vf_convolve.c:53
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:443
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
ConvolveContext::get_input
void(* get_input)(struct ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:74
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:276
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:471
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:127
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:479
ThreadData::plane
int plane
Definition: vf_blend.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:256
ConvolveContext::fft_hdata_in
AVComplexFloat * fft_hdata_in[4]
Definition: vf_convolve.c:58
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:480
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:472
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
ConvolveContext::planes
int planes
Definition: vf_convolve.c:68
convolve_inputs
static const AVFilterPad convolve_inputs[]
Definition: vf_convolve.c:851
get_output
static void get_output(ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:462
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
Definition: tx.h:47
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:456
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:470
get_zeropadded_input
static void get_zeropadded_input(ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:207
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:442
ConvolveContext::secondarywidth
int secondarywidth[4]
Definition: vf_convolve.c:55
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
fft_vertical
static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:372
ConvolveContext::impulse
int impulse
Definition: vf_convolve.c:69
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
ThreadData::n
int n
Definition: vf_convolve.c:183
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:440
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_convolve.c:117
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:478
ConvolveContext::fft_len
int fft_len[4]
Definition: vf_convolve.c:48
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ConvolveContext::tx_fn
av_tx_fn tx_fn[4]
Definition: vf_convolve.c:45
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:258
ConvolveContext::ifft
AVTXContext * ifft[4][MAX_THREADS]
Definition: vf_convolve.c:43
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ConvolveContext::prepare_impulse
void(* prepare_impulse)(AVFilterContext *ctx, AVFrame *impulsepic, int plane)
Definition: vf_convolve.c:79
config_input_impulse
static int config_input_impulse(AVFilterLink *inlink)
Definition: vf_convolve.c:167
prepare_secondary
static void prepare_secondary(AVFilterContext *ctx, AVFrame *secondary, int plane)
Definition: vf_convolve.c:650
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:460
ConvolveContext::fft_hdata_impulse_out
AVComplexFloat * fft_hdata_impulse_out[4]
Definition: vf_convolve.c:64
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
FLAGS
#define FLAGS
Definition: vf_convolve.c:85
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:474
ConvolveContext::fft_vdata_impulse_in
AVComplexFloat * fft_vdata_impulse_in[4]
Definition: vf_convolve.c:63
ConvolveContext::fft
AVTXContext * fft[4][MAX_THREADS]
Definition: vf_convolve.c:42
ConvolveContext::planeheight
int planeheight[4]
Definition: vf_convolve.c:50
ConvolveContext::fft_vdata_out
AVComplexFloat * fft_vdata_out[4]
Definition: vf_convolve.c:61
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:372
ConvolveContext::get_output
void(* get_output)(struct ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:77
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ConvolveContext::secondaryheight
int secondaryheight[4]
Definition: vf_convolve.c:56
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:464
AVComplexFloat::re
float re
Definition: tx.h:28
FRAMESYNC_DEFINE_PURE_CLASS
#define FRAMESYNC_DEFINE_PURE_CLASS(name, desc, func_prefix, options)
Definition: framesync.h:320
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:466
convolve_outputs
static const AVFilterPad convolve_outputs[]
Definition: vf_convolve.c:863
ConvolveContext::itx_fn
av_tx_fn itx_fn[4]
Definition: vf_convolve.c:46
complex_multiply
static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:515
ConvolveContext::fs
FFFrameSync fs
Definition: vf_convolve.c:40
convolve
static void convolve(float *tgt, const float *src, int len, int n)
Definition: ra288.c:89
activate
static int activate(AVFilterContext *ctx)
Definition: vf_convolve.c:795
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:498
ConvolveContext
Definition: vf_convolve.c:38
ConvolveContext::fft_hdata_impulse_in
AVComplexFloat * fft_hdata_impulse_in[4]
Definition: vf_convolve.c:62
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:294
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
ThreadData::hdata_out
AVComplexFloat * hdata_out
Definition: vf_convolve.c:182
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_convolve.c:827
do_convolve
static int do_convolve(FFFrameSync *fs)
Definition: vf_convolve.c:676
fft_horizontal
static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:186
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ifft_vertical
static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:397
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:476
ConvolveContext::depth
int depth
Definition: vf_convolve.c:67
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:777
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
ConvolveContext::noise
float noise
Definition: vf_convolve.c:70
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:458
AVFilter
Filter definition.
Definition: avfilter.h:161
convolve_options
static const AVOption convolve_options[]
Definition: vf_convolve.c:87
ret
ret
Definition: filter_design.txt:187
prepare_impulse
static void prepare_impulse(AVFilterContext *ctx, AVFrame *impulsepic, int plane)
Definition: vf_convolve.c:607
SQR
#define SQR(x)
Definition: vf_convolve.c:205
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:495
get_input
static void get_input(ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:297
ff_vf_convolve
const AVFilter ff_vf_convolve
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:463
ConvolveContext::fft_vdata_in
AVComplexFloat * fft_vdata_in[4]
Definition: vf_convolve.c:59
complex_xcorrelate
static int complex_xcorrelate(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:546
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:468
ConvolveContext::fft_vdata_impulse_out
AVComplexFloat * fft_vdata_impulse_out[4]
Definition: vf_convolve.c:65
framesync.h
noise
static int noise(AVBSFContext *ctx, AVPacket *pkt)
Definition: noise_bsf.c:120
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
get_xoutput
static void get_xoutput(ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:441
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:857
av_clip_uint8
#define av_clip_uint8
Definition: common.h:101
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:83
ff_vf_xcorrelate
const AVFilter ff_vf_xcorrelate
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ConvolveContext::planewidth
int planewidth[4]
Definition: vf_convolve.c:49
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_convolve.c:801
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:150
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:465
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:469
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:355
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_convolve.c:747
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:390
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:441
MAX_THREADS
#define MAX_THREADS
Definition: vf_convolve.c:36
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:146
int
int
Definition: ffmpeg_filter.c:156
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ConvolveContext::got_impulse
int got_impulse[4]
Definition: vf_convolve.c:72
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:467
tx.h
re
float re
Definition: fft.c:79