FFmpeg
vf_vaguedenoiser.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 LeFunGus, lefungus@altern.org
3  *
4  * This file is part of FFmpeg
5  *
6  * FFmpeg is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/imgutils.h"
24 #include "libavutil/attributes.h"
25 #include "libavutil/common.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/opt.h"
28 
29 #include "avfilter.h"
30 #include "internal.h"
31 #include "video.h"
32 
33 typedef struct VagueDenoiserContext {
34  const AVClass *class;
35 
36  float threshold;
37  float percent;
38  int method;
39  int type;
40  int nsteps;
41  int planes;
42 
43  int depth;
44  int bpc;
45  int peak;
46  int nb_planes;
47  int planeheight[4];
48  int planewidth[4];
49 
50  float *block;
51  float *in;
52  float *out;
53  float *tmp;
54 
55  int hlowsize[4][32];
56  int hhighsize[4][32];
57  int vlowsize[4][32];
58  int vhighsize[4][32];
59 
60  void (*thresholding)(float *block, const int width, const int height,
61  const int stride, const float threshold,
62  const float percent);
64 
65 #define OFFSET(x) offsetof(VagueDenoiserContext, x)
66 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
67 static const AVOption vaguedenoiser_options[] = {
68  { "threshold", "set filtering strength", OFFSET(threshold), AV_OPT_TYPE_FLOAT, {.dbl=2.}, 0,DBL_MAX, FLAGS },
69  { "method", "set filtering method", OFFSET(method), AV_OPT_TYPE_INT, {.i64=2 }, 0, 2, FLAGS, "method" },
70  { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "method" },
71  { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "method" },
72  { "garrote", "garrote thresholding", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "method" },
73  { "nsteps", "set number of steps", OFFSET(nsteps), AV_OPT_TYPE_INT, {.i64=6 }, 1, 32, FLAGS },
74  { "percent", "set percent of full denoising", OFFSET(percent),AV_OPT_TYPE_FLOAT, {.dbl=85}, 0,100, FLAGS },
75  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15 }, 0, 15, FLAGS },
76  { "type", "set threshold type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=0 }, 0, 1, FLAGS, "type" },
77  { "universal", "universal (VisuShrink)", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
78  { "bayes", "bayes (BayesShrink)", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
79  { NULL }
80 };
81 
82 AVFILTER_DEFINE_CLASS(vaguedenoiser);
83 
84 #define NPAD 10
85 
86 static const float analysis_low[9] = {
87  0.037828455506995f, -0.023849465019380f, -0.110624404418423f, 0.377402855612654f,
88  0.852698679009403f, 0.377402855612654f, -0.110624404418423f, -0.023849465019380f, 0.037828455506995f
89 };
90 
91 static const float analysis_high[7] = {
92  -0.064538882628938f, 0.040689417609558f, 0.418092273222212f, -0.788485616405664f,
93  0.418092273222212f, 0.040689417609558f, -0.064538882628938f
94 };
95 
96 static const float synthesis_low[7] = {
97  -0.064538882628938f, -0.040689417609558f, 0.418092273222212f, 0.788485616405664f,
98  0.418092273222212f, -0.040689417609558f, -0.064538882628938f
99 };
100 
101 static const float synthesis_high[9] = {
102  -0.037828455506995f, -0.023849465019380f, 0.110624404418423f, 0.377402855612654f,
103  -0.852698679009403f, 0.377402855612654f, 0.110624404418423f, -0.023849465019380f, -0.037828455506995f
104 };
105 
106 static const enum AVPixelFormat pix_fmts[] = {
130 };
131 
133 {
134  VagueDenoiserContext *s = inlink->dst->priv;
136  int p, i, nsteps_width, nsteps_height, nsteps_max;
137 
138  s->depth = desc->comp[0].depth;
139  s->bpc = (s->depth + 7) / 8;
140  s->nb_planes = desc->nb_components;
141 
142  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
143  s->planeheight[0] = s->planeheight[3] = inlink->h;
144  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
145  s->planewidth[0] = s->planewidth[3] = inlink->w;
146 
147  s->block = av_malloc_array(inlink->w * inlink->h, sizeof(*s->block));
148  s->in = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->in));
149  s->out = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->out));
150  s->tmp = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->tmp));
151 
152  if (!s->block || !s->in || !s->out || !s->tmp)
153  return AVERROR(ENOMEM);
154 
155  s->threshold *= 1 << (s->depth - 8);
156  s->peak = (1 << s->depth) - 1;
157 
158  nsteps_width = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planewidth[1] : s->planewidth[0];
159  nsteps_height = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planeheight[1] : s->planeheight[0];
160 
161  for (nsteps_max = 1; nsteps_max < 15; nsteps_max++) {
162  if (pow(2, nsteps_max) >= nsteps_width || pow(2, nsteps_max) >= nsteps_height)
163  break;
164  }
165 
166  s->nsteps = FFMIN(s->nsteps, nsteps_max - 2);
167 
168  for (p = 0; p < 4; p++) {
169  s->hlowsize[p][0] = (s->planewidth[p] + 1) >> 1;
170  s->hhighsize[p][0] = s->planewidth[p] >> 1;
171  s->vlowsize[p][0] = (s->planeheight[p] + 1) >> 1;
172  s->vhighsize[p][0] = s->planeheight[p] >> 1;
173 
174  for (i = 1; i < s->nsteps; i++) {
175  s->hlowsize[p][i] = (s->hlowsize[p][i - 1] + 1) >> 1;
176  s->hhighsize[p][i] = s->hlowsize[p][i - 1] >> 1;
177  s->vlowsize[p][i] = (s->vlowsize[p][i - 1] + 1) >> 1;
178  s->vhighsize[p][i] = s->vlowsize[p][i - 1] >> 1;
179  }
180  }
181 
182  return 0;
183 }
184 
185 static inline void copy(const float *p1, float *p2, const int length)
186 {
187  memcpy(p2, p1, length * sizeof(float));
188 }
189 
190 static inline void copyv(const float *p1, const int stride1, float *p2, const int length)
191 {
192  int i;
193 
194  for (i = 0; i < length; i++) {
195  p2[i] = *p1;
196  p1 += stride1;
197  }
198 }
199 
200 static inline void copyh(const float *p1, float *p2, const int stride2, const int length)
201 {
202  int i;
203 
204  for (i = 0; i < length; i++) {
205  *p2 = p1[i];
206  p2 += stride2;
207  }
208 }
209 
210 // Do symmetric extension of data using prescribed symmetries
211 // Original values are in output[npad] through output[npad+size-1]
212 // New values will be placed in output[0] through output[npad] and in output[npad+size] through output[2*npad+size-1] (note: end values may not be filled in)
213 // extension at left bdry is ... 3 2 1 0 | 0 1 2 3 ...
214 // same for right boundary
215 // if right_ext=1 then ... 3 2 1 0 | 1 2 3
216 static void symmetric_extension(float *output, const int size, const int left_ext, const int right_ext)
217 {
218  int first = NPAD;
219  int last = NPAD - 1 + size;
220  const int originalLast = last;
221  int i, nextend, idx;
222 
223  if (left_ext == 2)
224  output[--first] = output[NPAD];
225  if (right_ext == 2)
226  output[++last] = output[originalLast];
227 
228  // extend left end
229  nextend = first;
230  for (i = 0; i < nextend; i++)
231  output[--first] = output[NPAD + 1 + i];
232 
233  idx = NPAD + NPAD - 1 + size;
234 
235  // extend right end
236  nextend = idx - last;
237  for (i = 0; i < nextend; i++)
238  output[++last] = output[originalLast - 1 - i];
239 }
240 
241 static void transform_step(float *input, float *output, const int size, const int low_size, VagueDenoiserContext *s)
242 {
243  int i;
244 
246 
247  for (i = NPAD; i < NPAD + low_size; i++) {
248  const float a = input[2 * i - 14] * analysis_low[0];
249  const float b = input[2 * i - 13] * analysis_low[1];
250  const float c = input[2 * i - 12] * analysis_low[2];
251  const float d = input[2 * i - 11] * analysis_low[3];
252  const float e = input[2 * i - 10] * analysis_low[4];
253  const float f = input[2 * i - 9] * analysis_low[3];
254  const float g = input[2 * i - 8] * analysis_low[2];
255  const float h = input[2 * i - 7] * analysis_low[1];
256  const float k = input[2 * i - 6] * analysis_low[0];
257 
258  output[i] = a + b + c + d + e + f + g + h + k;
259  }
260 
261  for (i = NPAD; i < NPAD + low_size; i++) {
262  const float a = input[2 * i - 12] * analysis_high[0];
263  const float b = input[2 * i - 11] * analysis_high[1];
264  const float c = input[2 * i - 10] * analysis_high[2];
265  const float d = input[2 * i - 9] * analysis_high[3];
266  const float e = input[2 * i - 8] * analysis_high[2];
267  const float f = input[2 * i - 7] * analysis_high[1];
268  const float g = input[2 * i - 6] * analysis_high[0];
269 
270  output[i + low_size] = a + b + c + d + e + f + g;
271  }
272 }
273 
274 static void invert_step(const float *input, float *output, float *temp, const int size, VagueDenoiserContext *s)
275 {
276  const int low_size = (size + 1) >> 1;
277  const int high_size = size >> 1;
278  int left_ext = 1, right_ext, i;
279  int findex;
280 
281  memcpy(temp + NPAD, input + NPAD, low_size * sizeof(float));
282 
283  right_ext = (size % 2 == 0) ? 2 : 1;
284  symmetric_extension(temp, low_size, left_ext, right_ext);
285 
286  memset(output, 0, (NPAD + NPAD + size) * sizeof(float));
287  findex = (size + 2) >> 1;
288 
289  for (i = 9; i < findex + 11; i++) {
290  const float a = temp[i] * synthesis_low[0];
291  const float b = temp[i] * synthesis_low[1];
292  const float c = temp[i] * synthesis_low[2];
293  const float d = temp[i] * synthesis_low[3];
294 
295  output[2 * i - 13] += a;
296  output[2 * i - 12] += b;
297  output[2 * i - 11] += c;
298  output[2 * i - 10] += d;
299  output[2 * i - 9] += c;
300  output[2 * i - 8] += b;
301  output[2 * i - 7] += a;
302  }
303 
304  memcpy(temp + NPAD, input + NPAD + low_size, high_size * sizeof(float));
305 
306  left_ext = 2;
307  right_ext = (size % 2 == 0) ? 1 : 2;
308  symmetric_extension(temp, high_size, left_ext, right_ext);
309 
310  for (i = 8; i < findex + 11; i++) {
311  const float a = temp[i] * synthesis_high[0];
312  const float b = temp[i] * synthesis_high[1];
313  const float c = temp[i] * synthesis_high[2];
314  const float d = temp[i] * synthesis_high[3];
315  const float e = temp[i] * synthesis_high[4];
316 
317  output[2 * i - 13] += a;
318  output[2 * i - 12] += b;
319  output[2 * i - 11] += c;
320  output[2 * i - 10] += d;
321  output[2 * i - 9] += e;
322  output[2 * i - 8] += d;
323  output[2 * i - 7] += c;
324  output[2 * i - 6] += b;
325  output[2 * i - 5] += a;
326  }
327 }
328 
329 static void hard_thresholding(float *block, const int width, const int height,
330  const int stride, const float threshold,
331  const float percent)
332 {
333  const float frac = 1.f - percent * 0.01f;
334  int y, x;
335 
336  for (y = 0; y < height; y++) {
337  for (x = 0; x < width; x++) {
338  if (FFABS(block[x]) <= threshold)
339  block[x] *= frac;
340  }
341  block += stride;
342  }
343 }
344 
345 static void soft_thresholding(float *block, const int width, const int height, const int stride,
346  const float threshold, const float percent)
347 {
348  const float frac = 1.f - percent * 0.01f;
349  const float shift = threshold * 0.01f * percent;
350  int y, x;
351 
352  for (y = 0; y < height; y++) {
353  for (x = 0; x < width; x++) {
354  const float temp = FFABS(block[x]);
355  if (temp <= threshold)
356  block[x] *= frac;
357  else
358  block[x] = (block[x] < 0.f ? -1.f : (block[x] > 0.f ? 1.f : 0.f)) * (temp - shift);
359  }
360  block += stride;
361  }
362 }
363 
364 static void qian_thresholding(float *block, const int width, const int height,
365  const int stride, const float threshold,
366  const float percent)
367 {
368  const float percent01 = percent * 0.01f;
369  const float tr2 = threshold * threshold * percent01;
370  const float frac = 1.f - percent01;
371  int y, x;
372 
373  for (y = 0; y < height; y++) {
374  for (x = 0; x < width; x++) {
375  const float temp = FFABS(block[x]);
376  if (temp <= threshold) {
377  block[x] *= frac;
378  } else {
379  const float tp2 = temp * temp;
380  block[x] *= (tp2 - tr2) / tp2;
381  }
382  }
383  block += stride;
384  }
385 }
386 
387 static float bayes_threshold(float *block, const int width, const int height,
388  const int stride, const float threshold)
389 {
390  float mean = 0.f;
391 
392  for (int y = 0; y < height; y++) {
393  for (int x = 0; x < width; x++) {
394  mean += block[x] * block[x];
395  }
396  block += stride;
397  }
398 
399  mean /= width * height;
400 
401  return threshold * threshold / (FFMAX(sqrtf(mean - threshold), FLT_EPSILON));
402 }
403 
405 {
406  int p, y, x, i, j;
407 
408  for (p = 0; p < s->nb_planes; p++) {
409  const int height = s->planeheight[p];
410  const int width = s->planewidth[p];
411  const uint8_t *srcp8 = in->data[p];
412  const uint16_t *srcp16 = (const uint16_t *)in->data[p];
413  uint8_t *dstp8 = out->data[p];
414  uint16_t *dstp16 = (uint16_t *)out->data[p];
415  float *output = s->block;
416  int h_low_size0 = width;
417  int v_low_size0 = height;
418  int nsteps_transform = s->nsteps;
419  int nsteps_invert = s->nsteps;
420  const float *input = s->block;
421 
422  if (!((1 << p) & s->planes)) {
423  av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p],
424  s->planewidth[p] * s->bpc, s->planeheight[p]);
425  continue;
426  }
427 
428  if (s->depth <= 8) {
429  for (y = 0; y < height; y++) {
430  for (x = 0; x < width; x++)
431  output[x] = srcp8[x];
432  srcp8 += in->linesize[p];
433  output += width;
434  }
435  } else {
436  for (y = 0; y < height; y++) {
437  for (x = 0; x < width; x++)
438  output[x] = srcp16[x];
439  srcp16 += in->linesize[p] / 2;
440  output += width;
441  }
442  }
443 
444  while (nsteps_transform--) {
445  int low_size = (h_low_size0 + 1) >> 1;
446  float *input = s->block;
447  for (j = 0; j < v_low_size0; j++) {
448  copy(input, s->in + NPAD, h_low_size0);
449  transform_step(s->in, s->out, h_low_size0, low_size, s);
450  copy(s->out + NPAD, input, h_low_size0);
451  input += width;
452  }
453 
454  low_size = (v_low_size0 + 1) >> 1;
455  input = s->block;
456  for (j = 0; j < h_low_size0; j++) {
457  copyv(input, width, s->in + NPAD, v_low_size0);
458  transform_step(s->in, s->out, v_low_size0, low_size, s);
459  copyh(s->out + NPAD, input, width, v_low_size0);
460  input++;
461  }
462 
463  h_low_size0 = (h_low_size0 + 1) >> 1;
464  v_low_size0 = (v_low_size0 + 1) >> 1;
465  }
466 
467  if (s->type == 0) {
468  s->thresholding(s->block, width, height, width, s->threshold, s->percent);
469  } else {
470  for (int n = 0; n < s->nsteps; n++) {
471  float threshold;
472  float *block;
473 
474  if (n == s->nsteps - 1) {
475  threshold = bayes_threshold(s->block, s->hlowsize[p][n], s->vlowsize[p][n], width, s->threshold);
476  s->thresholding(s->block, s->hlowsize[p][n], s->vlowsize[p][n], width, threshold, s->percent);
477  }
478  block = s->block + s->hlowsize[p][n];
479  threshold = bayes_threshold(block, s->hhighsize[p][n], s->vlowsize[p][n], width, s->threshold);
480  s->thresholding(block, s->hhighsize[p][n], s->vlowsize[p][n], width, threshold, s->percent);
481  block = s->block + s->vlowsize[p][n] * width;
482  threshold = bayes_threshold(block, s->hlowsize[p][n], s->vhighsize[p][n], width, s->threshold);
483  s->thresholding(block, s->hlowsize[p][n], s->vhighsize[p][n], width, threshold, s->percent);
484  block = s->block + s->hlowsize[p][n] + s->vlowsize[p][n] * width;
485  threshold = bayes_threshold(block, s->hhighsize[p][n], s->vhighsize[p][n], width, s->threshold);
486  s->thresholding(block, s->hhighsize[p][n], s->vhighsize[p][n], width, threshold, s->percent);
487  }
488  }
489 
490  while (nsteps_invert--) {
491  const int idx = s->vlowsize[p][nsteps_invert] + s->vhighsize[p][nsteps_invert];
492  const int idx2 = s->hlowsize[p][nsteps_invert] + s->hhighsize[p][nsteps_invert];
493  float * idx3 = s->block;
494  for (i = 0; i < idx2; i++) {
495  copyv(idx3, width, s->in + NPAD, idx);
496  invert_step(s->in, s->out, s->tmp, idx, s);
497  copyh(s->out + NPAD, idx3, width, idx);
498  idx3++;
499  }
500 
501  idx3 = s->block;
502  for (i = 0; i < idx; i++) {
503  copy(idx3, s->in + NPAD, idx2);
504  invert_step(s->in, s->out, s->tmp, idx2, s);
505  copy(s->out + NPAD, idx3, idx2);
506  idx3 += width;
507  }
508  }
509 
510  if (s->depth <= 8) {
511  for (y = 0; y < height; y++) {
512  for (x = 0; x < width; x++)
513  dstp8[x] = av_clip_uint8(input[x] + 0.5f);
514  input += width;
515  dstp8 += out->linesize[p];
516  }
517  } else {
518  for (y = 0; y < height; y++) {
519  for (x = 0; x < width; x++)
520  dstp16[x] = av_clip(input[x] + 0.5f, 0, s->peak);
521  input += width;
522  dstp16 += out->linesize[p] / 2;
523  }
524  }
525  }
526 }
527 
529 {
530  AVFilterContext *ctx = inlink->dst;
531  VagueDenoiserContext *s = ctx->priv;
532  AVFilterLink *outlink = ctx->outputs[0];
533  AVFrame *out;
534  int direct = av_frame_is_writable(in);
535 
536  if (direct) {
537  out = in;
538  } else {
539  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
540  if (!out) {
541  av_frame_free(&in);
542  return AVERROR(ENOMEM);
543  }
544 
546  }
547 
548  filter(s, in, out);
549 
550  if (!direct)
551  av_frame_free(&in);
552 
553  return ff_filter_frame(outlink, out);
554 }
555 
557 {
558  VagueDenoiserContext *s = ctx->priv;
559 
560  switch (s->method) {
561  case 0:
562  s->thresholding = hard_thresholding;
563  break;
564  case 1:
565  s->thresholding = soft_thresholding;
566  break;
567  case 2:
568  s->thresholding = qian_thresholding;
569  break;
570  }
571 
572  return 0;
573 }
574 
576 {
577  VagueDenoiserContext *s = ctx->priv;
578 
579  av_freep(&s->block);
580  av_freep(&s->in);
581  av_freep(&s->out);
582  av_freep(&s->tmp);
583 }
584 
586  {
587  .name = "default",
588  .type = AVMEDIA_TYPE_VIDEO,
589  .config_props = config_input,
590  .filter_frame = filter_frame,
591  },
592 };
593 
594 
596  .name = "vaguedenoiser",
597  .description = NULL_IF_CONFIG_SMALL("Apply a Wavelet based Denoiser."),
598  .priv_size = sizeof(VagueDenoiserContext),
599  .priv_class = &vaguedenoiser_class,
600  .init = init,
601  .uninit = uninit,
606 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:108
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:512
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:491
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_clip
#define av_clip
Definition: common.h:96
transform_step
static void transform_step(float *input, float *output, const int size, const int low_size, VagueDenoiserContext *s)
Definition: vf_vaguedenoiser.c:241
filter
static void filter(VagueDenoiserContext *s, AVFrame *in, AVFrame *out)
Definition: vf_vaguedenoiser.c:404
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:172
copyv
static void copyv(const float *p1, const int stride1, float *p2, const int length)
Definition: vf_vaguedenoiser.c:190
OFFSET
#define OFFSET(x)
Definition: vf_vaguedenoiser.c:65
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:504
VagueDenoiserContext::in
float * in
Definition: vf_vaguedenoiser.c:51
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:511
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:506
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
float.h
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
video.h
copyh
static void copyh(const float *p1, float *p2, const int stride2, const int length)
Definition: vf_vaguedenoiser.c:200
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:507
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:448
symmetric_extension
static void symmetric_extension(float *output, const int size, const int left_ext, const int right_ext)
Definition: vf_vaguedenoiser.c:216
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
VagueDenoiserContext::hhighsize
int hhighsize[4][32]
Definition: vf_vaguedenoiser.c:56
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:503
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:486
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
analysis_high
static const float analysis_high[7]
Definition: vf_vaguedenoiser.c:91
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:484
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:513
VagueDenoiserContext::depth
int depth
Definition: vf_vaguedenoiser.c:43
VagueDenoiserContext::peak
int peak
Definition: vf_vaguedenoiser.c:45
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:466
invert_step
static void invert_step(const float *input, float *output, float *temp, const int size, VagueDenoiserContext *s)
Definition: vf_vaguedenoiser.c:274
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
VagueDenoiserContext::nsteps
int nsteps
Definition: vf_vaguedenoiser.c:40
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:452
qian_thresholding
static void qian_thresholding(float *block, const int width, const int height, const int stride, const float threshold, const float percent)
Definition: vf_vaguedenoiser.c:364
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
VagueDenoiserContext::nb_planes
int nb_planes
Definition: vf_vaguedenoiser.c:46
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:276
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:480
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:36
VagueDenoiserContext::thresholding
void(* thresholding)(float *block, const int width, const int height, const int stride, const float threshold, const float percent)
Definition: vf_vaguedenoiser.c:60
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:488
width
#define width
synthesis_low
static const float synthesis_low[7]
Definition: vf_vaguedenoiser.c:96
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:489
VagueDenoiserContext::percent
float percent
Definition: vf_vaguedenoiser.c:37
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:481
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
g
const char * g
Definition: vf_curves.c:127
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:510
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:465
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:479
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:451
synthesis_high
static const float synthesis_high[9]
Definition: vf_vaguedenoiser.c:101
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
VagueDenoiserContext::tmp
float * tmp
Definition: vf_vaguedenoiser.c:53
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
bayes_threshold
static float bayes_threshold(float *block, const int width, const int height, const int stride, const float threshold)
Definition: vf_vaguedenoiser.c:387
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:449
VagueDenoiserContext
Definition: vf_vaguedenoiser.c:33
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:487
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_vaguedenoiser.c:528
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:736
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
hard_thresholding
static void hard_thresholding(float *block, const int width, const int height, const int stride, const float threshold, const float percent)
Definition: vf_vaguedenoiser.c:329
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:470
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
vaguedenoiser_inputs
static const AVFilterPad vaguedenoiser_inputs[]
Definition: vf_vaguedenoiser.c:585
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:469
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:483
VagueDenoiserContext::threshold
float threshold
Definition: vf_vaguedenoiser.c:36
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
FLAGS
#define FLAGS
Definition: vf_vaguedenoiser.c:66
VagueDenoiserContext::hlowsize
int hlowsize[4][32]
Definition: vf_vaguedenoiser.c:55
f
f
Definition: af_crystalizer.c:121
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_vaguedenoiser.c:556
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
copy
static void copy(const float *p1, float *p2, const int length)
Definition: vf_vaguedenoiser.c:185
shift
static int shift(int a, int b)
Definition: bonk.c:262
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(vaguedenoiser)
ff_vf_vaguedenoiser
const AVFilter ff_vf_vaguedenoiser
Definition: vf_vaguedenoiser.c:595
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:473
size
int size
Definition: twinvq_data.h:10344
VagueDenoiserContext::vlowsize
int vlowsize[4][32]
Definition: vf_vaguedenoiser.c:57
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:475
VagueDenoiserContext::block
float * block
Definition: vf_vaguedenoiser.c:50
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:666
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_vaguedenoiser.c:575
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_vaguedenoiser.c:132
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:508
attributes.h
VagueDenoiserContext::planewidth
int planewidth[4]
Definition: vf_vaguedenoiser.c:48
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
internal.h
VagueDenoiserContext::vhighsize
int vhighsize[4][32]
Definition: vf_vaguedenoiser.c:58
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
VagueDenoiserContext::method
int method
Definition: vf_vaguedenoiser.c:38
VagueDenoiserContext::type
int type
Definition: vf_vaguedenoiser.c:39
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:485
common.h
planes
static const struct @363 planes[]
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:467
vaguedenoiser_options
static const AVOption vaguedenoiser_options[]
Definition: vf_vaguedenoiser.c:67
stride
#define stride
Definition: h264pred_template.c:537
AVFilter
Filter definition.
Definition: avfilter.h:166
VagueDenoiserContext::planeheight
int planeheight[4]
Definition: vf_vaguedenoiser.c:47
analysis_low
static const float analysis_low[9]
Definition: vf_vaguedenoiser.c:86
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:505
left_ext
static int left_ext(int wavelet_length, int levels, uint64_t sn)
Definition: af_afwtdn.c:506
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:472
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:477
soft_thresholding
static void soft_thresholding(float *block, const int width, const int height, const int stride, const float threshold, const float percent)
Definition: vf_vaguedenoiser.c:345
NPAD
#define NPAD
Definition: vf_vaguedenoiser.c:84
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:509
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
temp
else temp
Definition: vf_mcdeint.c:263
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:862
VagueDenoiserContext::planes
int planes
Definition: vf_vaguedenoiser.c:41
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
VagueDenoiserContext::out
float * out
Definition: vf_vaguedenoiser.c:52
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
d
d
Definition: ffmpeg_filter.c:368
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:474
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:478
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:450
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
VagueDenoiserContext::bpc
int bpc
Definition: vf_vaguedenoiser.c:44
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:476
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_vaguedenoiser.c:106