FFmpeg
vf_vaguedenoiser.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 LeFunGus, lefungus@altern.org
3  *
4  * This file is part of FFmpeg
5  *
6  * FFmpeg is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/imgutils.h"
24 #include "libavutil/attributes.h"
25 #include "libavutil/common.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/intreadwrite.h"
28 #include "libavutil/opt.h"
29 
30 #include "avfilter.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 typedef struct VagueDenoiserContext {
36  const AVClass *class;
37 
38  float threshold;
39  float percent;
40  int method;
41  int nsteps;
42  int planes;
43 
44  int depth;
45  int bpc;
46  int peak;
47  int nb_planes;
48  int planeheight[4];
49  int planewidth[4];
50 
51  float *block;
52  float *in;
53  float *out;
54  float *tmp;
55 
56  int hlowsize[4][32];
57  int hhighsize[4][32];
58  int vlowsize[4][32];
59  int vhighsize[4][32];
60 
61  void (*thresholding)(float *block, const int width, const int height,
62  const int stride, const float threshold,
63  const float percent, const int nsteps);
65 
66 #define OFFSET(x) offsetof(VagueDenoiserContext, x)
67 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
68 static const AVOption vaguedenoiser_options[] = {
69  { "threshold", "set filtering strength", OFFSET(threshold), AV_OPT_TYPE_FLOAT, {.dbl=2.}, 0,DBL_MAX, FLAGS },
70  { "method", "set filtering method", OFFSET(method), AV_OPT_TYPE_INT, {.i64=2 }, 0, 2, FLAGS, "method" },
71  { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "method" },
72  { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "method" },
73  { "garrote", "garotte thresholding", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "method" },
74  { "nsteps", "set number of steps", OFFSET(nsteps), AV_OPT_TYPE_INT, {.i64=6 }, 1, 32, FLAGS },
75  { "percent", "set percent of full denoising", OFFSET(percent),AV_OPT_TYPE_FLOAT, {.dbl=85}, 0,100, FLAGS },
76  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15 }, 0, 15, FLAGS },
77  { NULL }
78 };
79 
80 AVFILTER_DEFINE_CLASS(vaguedenoiser);
81 
82 #define NPAD 10
83 
84 static const float analysis_low[9] = {
85  0.037828455506995f, -0.023849465019380f, -0.110624404418423f, 0.377402855612654f,
86  0.852698679009403f, 0.377402855612654f, -0.110624404418423f, -0.023849465019380f, 0.037828455506995f
87 };
88 
89 static const float analysis_high[7] = {
90  -0.064538882628938f, 0.040689417609558f, 0.418092273222212f, -0.788485616405664f,
91  0.418092273222212f, 0.040689417609558f, -0.064538882628938f
92 };
93 
94 static const float synthesis_low[7] = {
95  -0.064538882628938f, -0.040689417609558f, 0.418092273222212f, 0.788485616405664f,
96  0.418092273222212f, -0.040689417609558f, -0.064538882628938f
97 };
98 
99 static const float synthesis_high[9] = {
100  -0.037828455506995f, -0.023849465019380f, 0.110624404418423f, 0.377402855612654f,
101  -0.852698679009403f, 0.377402855612654f, 0.110624404418423f, -0.023849465019380f, -0.037828455506995f
102 };
103 
105 {
106  static const enum AVPixelFormat pix_fmts[] = {
130  };
131  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
132  if (!fmts_list)
133  return AVERROR(ENOMEM);
134  return ff_set_common_formats(ctx, fmts_list);
135 }
136 
138 {
139  VagueDenoiserContext *s = inlink->dst->priv;
141  int p, i, nsteps_width, nsteps_height, nsteps_max;
142 
143  s->depth = desc->comp[0].depth;
144  s->bpc = (s->depth + 7) / 8;
145  s->nb_planes = desc->nb_components;
146 
147  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
148  s->planeheight[0] = s->planeheight[3] = inlink->h;
149  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
150  s->planewidth[0] = s->planewidth[3] = inlink->w;
151 
152  s->block = av_malloc_array(inlink->w * inlink->h, sizeof(*s->block));
153  s->in = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->in));
154  s->out = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->out));
155  s->tmp = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->tmp));
156 
157  if (!s->block || !s->in || !s->out || !s->tmp)
158  return AVERROR(ENOMEM);
159 
160  s->threshold *= 1 << (s->depth - 8);
161  s->peak = (1 << s->depth) - 1;
162 
163  nsteps_width = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planewidth[1] : s->planewidth[0];
164  nsteps_height = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planeheight[1] : s->planeheight[0];
165 
166  for (nsteps_max = 1; nsteps_max < 15; nsteps_max++) {
167  if (pow(2, nsteps_max) >= nsteps_width || pow(2, nsteps_max) >= nsteps_height)
168  break;
169  }
170 
171  s->nsteps = FFMIN(s->nsteps, nsteps_max - 2);
172 
173  for (p = 0; p < 4; p++) {
174  s->hlowsize[p][0] = (s->planewidth[p] + 1) >> 1;
175  s->hhighsize[p][0] = s->planewidth[p] >> 1;
176  s->vlowsize[p][0] = (s->planeheight[p] + 1) >> 1;
177  s->vhighsize[p][0] = s->planeheight[p] >> 1;
178 
179  for (i = 1; i < s->nsteps; i++) {
180  s->hlowsize[p][i] = (s->hlowsize[p][i - 1] + 1) >> 1;
181  s->hhighsize[p][i] = s->hlowsize[p][i - 1] >> 1;
182  s->vlowsize[p][i] = (s->vlowsize[p][i - 1] + 1) >> 1;
183  s->vhighsize[p][i] = s->vlowsize[p][i - 1] >> 1;
184  }
185  }
186 
187  return 0;
188 }
189 
190 static inline void copy(const float *p1, float *p2, const int length)
191 {
192  memcpy(p2, p1, length * sizeof(float));
193 }
194 
195 static inline void copyv(const float *p1, const int stride1, float *p2, const int length)
196 {
197  int i;
198 
199  for (i = 0; i < length; i++) {
200  p2[i] = *p1;
201  p1 += stride1;
202  }
203 }
204 
205 static inline void copyh(const float *p1, float *p2, const int stride2, const int length)
206 {
207  int i;
208 
209  for (i = 0; i < length; i++) {
210  *p2 = p1[i];
211  p2 += stride2;
212  }
213 }
214 
215 // Do symmetric extension of data using prescribed symmetries
216 // Original values are in output[npad] through output[npad+size-1]
217 // New values will be placed in output[0] through output[npad] and in output[npad+size] through output[2*npad+size-1] (note: end values may not be filled in)
218 // extension at left bdry is ... 3 2 1 0 | 0 1 2 3 ...
219 // same for right boundary
220 // if right_ext=1 then ... 3 2 1 0 | 1 2 3
221 static void symmetric_extension(float *output, const int size, const int left_ext, const int right_ext)
222 {
223  int first = NPAD;
224  int last = NPAD - 1 + size;
225  const int originalLast = last;
226  int i, nextend, idx;
227 
228  if (left_ext == 2)
229  output[--first] = output[NPAD];
230  if (right_ext == 2)
231  output[++last] = output[originalLast];
232 
233  // extend left end
234  nextend = first;
235  for (i = 0; i < nextend; i++)
236  output[--first] = output[NPAD + 1 + i];
237 
238  idx = NPAD + NPAD - 1 + size;
239 
240  // extend right end
241  nextend = idx - last;
242  for (i = 0; i < nextend; i++)
243  output[++last] = output[originalLast - 1 - i];
244 }
245 
246 static void transform_step(float *input, float *output, const int size, const int low_size, VagueDenoiserContext *s)
247 {
248  int i;
249 
250  symmetric_extension(input, size, 1, 1);
251 
252  for (i = NPAD; i < NPAD + low_size; i++) {
253  const float a = input[2 * i - 14] * analysis_low[0];
254  const float b = input[2 * i - 13] * analysis_low[1];
255  const float c = input[2 * i - 12] * analysis_low[2];
256  const float d = input[2 * i - 11] * analysis_low[3];
257  const float e = input[2 * i - 10] * analysis_low[4];
258  const float f = input[2 * i - 9] * analysis_low[3];
259  const float g = input[2 * i - 8] * analysis_low[2];
260  const float h = input[2 * i - 7] * analysis_low[1];
261  const float k = input[2 * i - 6] * analysis_low[0];
262 
263  output[i] = a + b + c + d + e + f + g + h + k;
264  }
265 
266  for (i = NPAD; i < NPAD + low_size; i++) {
267  const float a = input[2 * i - 12] * analysis_high[0];
268  const float b = input[2 * i - 11] * analysis_high[1];
269  const float c = input[2 * i - 10] * analysis_high[2];
270  const float d = input[2 * i - 9] * analysis_high[3];
271  const float e = input[2 * i - 8] * analysis_high[2];
272  const float f = input[2 * i - 7] * analysis_high[1];
273  const float g = input[2 * i - 6] * analysis_high[0];
274 
275  output[i + low_size] = a + b + c + d + e + f + g;
276  }
277 }
278 
279 static void invert_step(const float *input, float *output, float *temp, const int size, VagueDenoiserContext *s)
280 {
281  const int low_size = (size + 1) >> 1;
282  const int high_size = size >> 1;
283  int left_ext = 1, right_ext, i;
284  int findex;
285 
286  memcpy(temp + NPAD, input + NPAD, low_size * sizeof(float));
287 
288  right_ext = (size % 2 == 0) ? 2 : 1;
289  symmetric_extension(temp, low_size, left_ext, right_ext);
290 
291  memset(output, 0, (NPAD + NPAD + size) * sizeof(float));
292  findex = (size + 2) >> 1;
293 
294  for (i = 9; i < findex + 11; i++) {
295  const float a = temp[i] * synthesis_low[0];
296  const float b = temp[i] * synthesis_low[1];
297  const float c = temp[i] * synthesis_low[2];
298  const float d = temp[i] * synthesis_low[3];
299 
300  output[2 * i - 13] += a;
301  output[2 * i - 12] += b;
302  output[2 * i - 11] += c;
303  output[2 * i - 10] += d;
304  output[2 * i - 9] += c;
305  output[2 * i - 8] += b;
306  output[2 * i - 7] += a;
307  }
308 
309  memcpy(temp + NPAD, input + NPAD + low_size, high_size * sizeof(float));
310 
311  left_ext = 2;
312  right_ext = (size % 2 == 0) ? 1 : 2;
313  symmetric_extension(temp, high_size, left_ext, right_ext);
314 
315  for (i = 8; i < findex + 11; i++) {
316  const float a = temp[i] * synthesis_high[0];
317  const float b = temp[i] * synthesis_high[1];
318  const float c = temp[i] * synthesis_high[2];
319  const float d = temp[i] * synthesis_high[3];
320  const float e = temp[i] * synthesis_high[4];
321 
322  output[2 * i - 13] += a;
323  output[2 * i - 12] += b;
324  output[2 * i - 11] += c;
325  output[2 * i - 10] += d;
326  output[2 * i - 9] += e;
327  output[2 * i - 8] += d;
328  output[2 * i - 7] += c;
329  output[2 * i - 6] += b;
330  output[2 * i - 5] += a;
331  }
332 }
333 
334 static void hard_thresholding(float *block, const int width, const int height,
335  const int stride, const float threshold,
336  const float percent, const int unused)
337 {
338  const float frac = 1.f - percent * 0.01f;
339  int y, x;
340 
341  for (y = 0; y < height; y++) {
342  for (x = 0; x < width; x++) {
343  if (FFABS(block[x]) <= threshold)
344  block[x] *= frac;
345  }
346  block += stride;
347  }
348 }
349 
350 static void soft_thresholding(float *block, const int width, const int height, const int stride,
351  const float threshold, const float percent, const int nsteps)
352 {
353  const float frac = 1.f - percent * 0.01f;
354  const float shift = threshold * 0.01f * percent;
355  int w = width;
356  int h = height;
357  int y, x, l;
358 
359  for (l = 0; l < nsteps; l++) {
360  w = (w + 1) >> 1;
361  h = (h + 1) >> 1;
362  }
363 
364  for (y = 0; y < height; y++) {
365  const int x0 = (y < h) ? w : 0;
366  for (x = x0; x < width; x++) {
367  const float temp = FFABS(block[x]);
368  if (temp <= threshold)
369  block[x] *= frac;
370  else
371  block[x] = (block[x] < 0.f ? -1.f : (block[x] > 0.f ? 1.f : 0.f)) * (temp - shift);
372  }
373  block += stride;
374  }
375 }
376 
377 static void qian_thresholding(float *block, const int width, const int height,
378  const int stride, const float threshold,
379  const float percent, const int unused)
380 {
381  const float percent01 = percent * 0.01f;
382  const float tr2 = threshold * threshold * percent01;
383  const float frac = 1.f - percent01;
384  int y, x;
385 
386  for (y = 0; y < height; y++) {
387  for (x = 0; x < width; x++) {
388  const float temp = FFABS(block[x]);
389  if (temp <= threshold) {
390  block[x] *= frac;
391  } else {
392  const float tp2 = temp * temp;
393  block[x] *= (tp2 - tr2) / tp2;
394  }
395  }
396  block += stride;
397  }
398 }
399 
401 {
402  int p, y, x, i, j;
403 
404  for (p = 0; p < s->nb_planes; p++) {
405  const int height = s->planeheight[p];
406  const int width = s->planewidth[p];
407  const uint8_t *srcp8 = in->data[p];
408  const uint16_t *srcp16 = (const uint16_t *)in->data[p];
409  uint8_t *dstp8 = out->data[p];
410  uint16_t *dstp16 = (uint16_t *)out->data[p];
411  float *output = s->block;
412  int h_low_size0 = width;
413  int v_low_size0 = height;
414  int nsteps_transform = s->nsteps;
415  int nsteps_invert = s->nsteps;
416  const float *input = s->block;
417 
418  if (!((1 << p) & s->planes)) {
419  av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p],
420  s->planewidth[p] * s->bpc, s->planeheight[p]);
421  continue;
422  }
423 
424  if (s->depth <= 8) {
425  for (y = 0; y < height; y++) {
426  for (x = 0; x < width; x++)
427  output[x] = srcp8[x];
428  srcp8 += in->linesize[p];
429  output += width;
430  }
431  } else {
432  for (y = 0; y < height; y++) {
433  for (x = 0; x < width; x++)
434  output[x] = srcp16[x];
435  srcp16 += in->linesize[p] / 2;
436  output += width;
437  }
438  }
439 
440  while (nsteps_transform--) {
441  int low_size = (h_low_size0 + 1) >> 1;
442  float *input = s->block;
443  for (j = 0; j < v_low_size0; j++) {
444  copy(input, s->in + NPAD, h_low_size0);
445  transform_step(s->in, s->out, h_low_size0, low_size, s);
446  copy(s->out + NPAD, input, h_low_size0);
447  input += width;
448  }
449 
450  low_size = (v_low_size0 + 1) >> 1;
451  input = s->block;
452  for (j = 0; j < h_low_size0; j++) {
453  copyv(input, width, s->in + NPAD, v_low_size0);
454  transform_step(s->in, s->out, v_low_size0, low_size, s);
455  copyh(s->out + NPAD, input, width, v_low_size0);
456  input++;
457  }
458 
459  h_low_size0 = (h_low_size0 + 1) >> 1;
460  v_low_size0 = (v_low_size0 + 1) >> 1;
461  }
462 
463  s->thresholding(s->block, width, height, width, s->threshold, s->percent, s->nsteps);
464 
465  while (nsteps_invert--) {
466  const int idx = s->vlowsize[p][nsteps_invert] + s->vhighsize[p][nsteps_invert];
467  const int idx2 = s->hlowsize[p][nsteps_invert] + s->hhighsize[p][nsteps_invert];
468  float * idx3 = s->block;
469  for (i = 0; i < idx2; i++) {
470  copyv(idx3, width, s->in + NPAD, idx);
471  invert_step(s->in, s->out, s->tmp, idx, s);
472  copyh(s->out + NPAD, idx3, width, idx);
473  idx3++;
474  }
475 
476  idx3 = s->block;
477  for (i = 0; i < idx; i++) {
478  copy(idx3, s->in + NPAD, idx2);
479  invert_step(s->in, s->out, s->tmp, idx2, s);
480  copy(s->out + NPAD, idx3, idx2);
481  idx3 += width;
482  }
483  }
484 
485  if (s->depth <= 8) {
486  for (y = 0; y < height; y++) {
487  for (x = 0; x < width; x++)
488  dstp8[x] = av_clip_uint8(input[x] + 0.5f);
489  input += width;
490  dstp8 += out->linesize[p];
491  }
492  } else {
493  for (y = 0; y < height; y++) {
494  for (x = 0; x < width; x++)
495  dstp16[x] = av_clip(input[x] + 0.5f, 0, s->peak);
496  input += width;
497  dstp16 += out->linesize[p] / 2;
498  }
499  }
500  }
501 }
502 
504 {
505  AVFilterContext *ctx = inlink->dst;
506  VagueDenoiserContext *s = ctx->priv;
507  AVFilterLink *outlink = ctx->outputs[0];
508  AVFrame *out;
509  int direct = av_frame_is_writable(in);
510 
511  if (direct) {
512  out = in;
513  } else {
514  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
515  if (!out) {
516  av_frame_free(&in);
517  return AVERROR(ENOMEM);
518  }
519 
520  av_frame_copy_props(out, in);
521  }
522 
523  filter(s, in, out);
524 
525  if (!direct)
526  av_frame_free(&in);
527 
528  return ff_filter_frame(outlink, out);
529 }
530 
532 {
533  VagueDenoiserContext *s = ctx->priv;
534 
535  switch (s->method) {
536  case 0:
538  break;
539  case 1:
541  break;
542  case 2:
544  break;
545  }
546 
547  return 0;
548 }
549 
551 {
552  VagueDenoiserContext *s = ctx->priv;
553 
554  av_freep(&s->block);
555  av_freep(&s->in);
556  av_freep(&s->out);
557  av_freep(&s->tmp);
558 }
559 
561  {
562  .name = "default",
563  .type = AVMEDIA_TYPE_VIDEO,
564  .config_props = config_input,
565  .filter_frame = filter_frame,
566  },
567  { NULL }
568 };
569 
570 
572  {
573  .name = "default",
574  .type = AVMEDIA_TYPE_VIDEO
575  },
576  { NULL }
577 };
578 
580  .name = "vaguedenoiser",
581  .description = NULL_IF_CONFIG_SMALL("Apply a Wavelet based Denoiser."),
582  .priv_size = sizeof(VagueDenoiserContext),
583  .priv_class = &vaguedenoiser_class,
584  .init = init,
585  .uninit = uninit,
587  .inputs = vaguedenoiser_inputs,
588  .outputs = vaguedenoiser_outputs,
590 };
#define NULL
Definition: coverity.c:32
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
static av_cold int init(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static int shift(int a, int b)
Definition: sonic.c:82
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:389
static void qian_thresholding(float *block, const int width, const int height, const int stride, const float threshold, const float percent, const int unused)
static void copy(const float *p1, float *p2, const int length)
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
Main libavfilter public API header.
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:115
const char * desc
Definition: nvenc.c:68
static void hard_thresholding(float *block, const int width, const int height, const int stride, const float threshold, const float percent, const int unused)
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:367
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
AVFilter ff_vf_vaguedenoiser
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
Macro definitions for various function/variable attributes.
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
AVFILTER_DEFINE_CLASS(vaguedenoiser)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
static void copyv(const float *p1, const int stride1, float *p2, const int length)
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
#define height
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
ptrdiff_t size
Definition: opengl_enc.c:100
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
A filter pad used for either input or output.
Definition: internal.h:54
static const float synthesis_low[7]
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
static void symmetric_extension(float *output, const int size, const int left_ext, const int right_ext)
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
GLsizei GLsizei * length
Definition: opengl_enc.c:114
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
static void invert_step(const float *input, float *output, float *temp, const int size, VagueDenoiserContext *s)
#define FFMAX(a, b)
Definition: common.h:94
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
static const AVOption vaguedenoiser_options[]
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
#define b
Definition: input.c:41
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:428
#define width
uint8_t w
Definition: llviddspenc.c:38
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static const AVFilterPad vaguedenoiser_outputs[]
AVFormatContext * ctx
Definition: movenc.c:48
#define FLAGS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
static const AVFilterPad vaguedenoiser_inputs[]
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int query_formats(AVFilterContext *ctx)
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
static void transform_step(float *input, float *output, const int size, const int low_size, VagueDenoiserContext *s)
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:370
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
#define OFFSET(x)
void(* thresholding)(float *block, const int width, const int height, const int stride, const float threshold, const float percent, const int nsteps)
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:393
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
#define flags(name, subs,...)
Definition: cbs_av1.c:561
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
static av_cold void uninit(AVFilterContext *ctx)
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
common internal and external API header
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
static const float synthesis_high[9]
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
#define NPAD
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
static int config_input(AVFilterLink *inlink)
static void filter(VagueDenoiserContext *s, AVFrame *in, AVFrame *out)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
static const float analysis_high[7]
An instance of a filter.
Definition: avfilter.h:338
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
#define av_malloc_array(a, b)
static void soft_thresholding(float *block, const int width, const int height, const int stride, const float threshold, const float percent, const int nsteps)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
#define stride
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
static void copyh(const float *p1, float *p2, const int stride2, const int length)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
static const float analysis_low[9]
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:427
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58