FFmpeg
vf_bm3d.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 mawen1250
3  * Copyright (c) 2018 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in all
15  * copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  */
25 
26 /**
27  * @todo
28  * - non-power of 2 DCT
29  * - opponent color space
30  * - temporal support
31  */
32 
33 #include <float.h>
34 
35 #include "libavutil/avassert.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavcodec/avfft.h"
40 #include "avfilter.h"
41 #include "filters.h"
42 #include "formats.h"
43 #include "framesync.h"
44 #include "internal.h"
45 #include "video.h"
46 
47 #define MAX_NB_THREADS 32
48 
53 };
54 
55 typedef struct ThreadData {
56  const uint8_t *src;
58  const uint8_t *ref;
60  int plane;
61 } ThreadData;
62 
63 typedef struct PosCode {
64  int x, y;
65 } PosCode;
66 
67 typedef struct PosPairCode {
68  double score;
69  int x, y;
70 } PosPairCode;
71 
72 typedef struct SliceContext {
73  DCTContext *gdctf, *gdcti;
74  DCTContext *dctf, *dcti;
83  float *num, *den;
84  PosPairCode match_blocks[256];
87 } SliceContext;
88 
89 typedef struct BM3DContext {
90  const AVClass *class;
91 
92  float sigma;
96  int bm_range;
97  int bm_step;
98  float th_mse;
100  int mode;
101  int ref;
102  int planes;
103 
104  int depth;
105  int max;
107  int planewidth[4];
108  int planeheight[4];
111 
113 
116 
118  int y, int x, int block_size, float *dst);
119  double (*do_block_ssd)(struct BM3DContext *s, PosCode *pos,
120  const uint8_t *src, int src_stride,
121  int r_y, int r_x);
123  int plane, int nb_jobs);
125  const uint8_t *src, int src_linesize,
126  const uint8_t *ref, int ref_linesize,
127  int y, int x, int plane, int jobnr);
128 } BM3DContext;
129 
130 #define OFFSET(x) offsetof(BM3DContext, x)
131 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
132 static const AVOption bm3d_options[] = {
133  { "sigma", "set denoising strength",
134  OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 99999.9, FLAGS },
135  { "block", "set log2(size) of local patch",
136  OFFSET(block_size), AV_OPT_TYPE_INT, {.i64=4}, 4, 6, FLAGS },
137  { "bstep", "set sliding step for processing blocks",
138  OFFSET(block_step), AV_OPT_TYPE_INT, {.i64=4}, 1, 64, FLAGS },
139  { "group", "set maximal number of similar blocks",
140  OFFSET(group_size), AV_OPT_TYPE_INT, {.i64=1}, 1, 256, FLAGS },
141  { "range", "set block matching range",
142  OFFSET(bm_range), AV_OPT_TYPE_INT, {.i64=9}, 1, INT32_MAX, FLAGS },
143  { "mstep", "set step for block matching",
144  OFFSET(bm_step), AV_OPT_TYPE_INT, {.i64=1}, 1, 64, FLAGS },
145  { "thmse", "set threshold of mean square error for block matching",
146  OFFSET(th_mse), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, INT32_MAX, FLAGS },
147  { "hdthr", "set hard threshold for 3D transfer domain",
148  OFFSET(hard_threshold), AV_OPT_TYPE_FLOAT, {.dbl=2.7}, 0, INT32_MAX, FLAGS },
149  { "estim", "set filtering estimation mode",
150  OFFSET(mode), AV_OPT_TYPE_INT, {.i64=BASIC}, 0, NB_MODES-1, FLAGS, "mode" },
151  { "basic", "basic estimate",
152  0, AV_OPT_TYPE_CONST, {.i64=BASIC}, 0, 0, FLAGS, "mode" },
153  { "final", "final estimate",
154  0, AV_OPT_TYPE_CONST, {.i64=FINAL}, 0, 0, FLAGS, "mode" },
155  { "ref", "have reference stream",
156  OFFSET(ref), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
157  { "planes", "set planes to filter",
158  OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
159  { NULL }
160 };
161 
163 
165 {
166  static const enum AVPixelFormat pix_fmts[] = {
185  };
186 
187  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
188  if (!fmts_list)
189  return AVERROR(ENOMEM);
190  return ff_set_common_formats(ctx, fmts_list);
191 }
192 
193 static int do_search_boundary(int pos, int plane_boundary, int search_range, int search_step)
194 {
195  int search_boundary;
196 
197  search_range = search_range / search_step * search_step;
198 
199  if (pos == plane_boundary) {
200  search_boundary = plane_boundary;
201  } else if (pos > plane_boundary) {
202  search_boundary = pos - search_range;
203 
204  while (search_boundary < plane_boundary) {
205  search_boundary += search_step;
206  }
207  } else {
208  search_boundary = pos + search_range;
209 
210  while (search_boundary > plane_boundary) {
211  search_boundary -= search_step;
212  }
213  }
214 
215  return search_boundary;
216 }
217 
218 static int search_boundary(int plane_boundary, int search_range, int search_step, int vertical, int y, int x)
219 {
220  return do_search_boundary(vertical ? y : x, plane_boundary, search_range, search_step);
221 }
222 
223 static int cmp_scores(const void *a, const void *b)
224 {
225  const struct PosPairCode *pair1 = a;
226  const struct PosPairCode *pair2 = b;
227  return FFDIFFSIGN(pair1->score, pair2->score);
228 }
229 
230 static double do_block_ssd(BM3DContext *s, PosCode *pos, const uint8_t *src, int src_stride, int r_y, int r_x)
231 {
232  const uint8_t *srcp = src + pos->y * src_stride + pos->x;
233  const uint8_t *refp = src + r_y * src_stride + r_x;
234  const int block_size = s->block_size;
235  double dist = 0.;
236  int x, y;
237 
238  for (y = 0; y < block_size; y++) {
239  for (x = 0; x < block_size; x++) {
240  double temp = refp[x] - srcp[x];
241  dist += temp * temp;
242  }
243 
244  srcp += src_stride;
245  refp += src_stride;
246  }
247 
248  return dist;
249 }
250 
251 static double do_block_ssd16(BM3DContext *s, PosCode *pos, const uint8_t *src, int src_stride, int r_y, int r_x)
252 {
253  const uint16_t *srcp = (uint16_t *)src + pos->y * src_stride / 2 + pos->x;
254  const uint16_t *refp = (uint16_t *)src + r_y * src_stride / 2 + r_x;
255  const int block_size = s->block_size;
256  double dist = 0.;
257  int x, y;
258 
259  for (y = 0; y < block_size; y++) {
260  for (x = 0; x < block_size; x++) {
261  double temp = refp[x] - srcp[x];
262  dist += temp * temp;
263  }
264 
265  srcp += src_stride / 2;
266  refp += src_stride / 2;
267  }
268 
269  return dist;
270 }
271 
272 static void do_block_matching_multi(BM3DContext *s, const uint8_t *src, int src_stride, int src_range,
273  const PosCode *search_pos, int search_size, float th_mse,
274  int r_y, int r_x, int plane, int jobnr)
275 {
276  SliceContext *sc = &s->slices[jobnr];
277  double MSE2SSE = s->group_size * s->block_size * s->block_size * src_range * src_range / (s->max * s->max);
278  double distMul = 1. / MSE2SSE;
279  double th_sse = th_mse * MSE2SSE;
280  int i, index = sc->nb_match_blocks;
281 
282  for (i = 0; i < search_size; i++) {
283  PosCode pos = search_pos[i];
284  double dist;
285 
286  dist = s->do_block_ssd(s, &pos, src, src_stride, r_y, r_x);
287 
288  // Only match similar blocks but not identical blocks
289  if (dist <= th_sse && dist != 0) {
290  const double score = dist * distMul;
291 
292  if (index >= s->group_size && score >= sc->match_blocks[index - 1].score) {
293  continue;
294  }
295 
296  if (index >= s->group_size)
297  index = s->group_size - 1;
298 
299  sc->match_blocks[index].score = score;
300  sc->match_blocks[index].y = pos.y;
301  sc->match_blocks[index].x = pos.x;
302  index++;
303  qsort(sc->match_blocks, index, sizeof(PosPairCode), cmp_scores);
304  }
305  }
306 
307  sc->nb_match_blocks = index;
308 }
309 
310 static void block_matching_multi(BM3DContext *s, const uint8_t *ref, int ref_linesize, int y, int x,
311  int exclude_cur_pos, int plane, int jobnr)
312 {
313  SliceContext *sc = &s->slices[jobnr];
314  const int width = s->planewidth[plane];
315  const int height = s->planeheight[plane];
316  const int block_size = s->block_size;
317  const int step = s->bm_step;
318  const int range = s->bm_range / step * step;
319  int l = search_boundary(0, range, step, 0, y, x);
320  int r = search_boundary(width - block_size, range, step, 0, y, x);
321  int t = search_boundary(0, range, step, 1, y, x);
322  int b = search_boundary(height - block_size, range, step, 1, y, x);
323  int j, i, index = 0;
324 
325  for (j = t; j <= b; j += step) {
326  for (i = l; i <= r; i += step) {
327  PosCode pos;
328 
329  if (exclude_cur_pos > 0 && j == y && i == x) {
330  continue;
331  }
332 
333  pos.y = j;
334  pos.x = i;
335  sc->search_positions[index++] = pos;
336  }
337  }
338 
339  if (exclude_cur_pos == 1) {
340  sc->match_blocks[0].score = 0;
341  sc->match_blocks[0].y = y;
342  sc->match_blocks[0].x = x;
343  sc->nb_match_blocks = 1;
344  }
345 
346  do_block_matching_multi(s, ref, ref_linesize, s->bm_range,
347  sc->search_positions, index, s->th_mse, y, x, plane, jobnr);
348 }
349 
351  int j, int i, int plane, int jobnr)
352 {
353  SliceContext *sc = &s->slices[jobnr];
354 
355  if (s->group_size == 1 || s->th_mse <= 0.f) {
356  sc->match_blocks[0].score = 1;
357  sc->match_blocks[0].x = i;
358  sc->match_blocks[0].y = j;
359  sc->nb_match_blocks = 1;
360  return;
361  }
362 
363  sc->nb_match_blocks = 0;
364  block_matching_multi(s, ref, ref_linesize, j, i, 1, plane, jobnr);
365 }
366 
367 static void get_block_row(const uint8_t *srcp, int src_linesize,
368  int y, int x, int block_size, float *dst)
369 {
370  const uint8_t *src = srcp + y * src_linesize + x;
371  int j;
372 
373  for (j = 0; j < block_size; j++) {
374  dst[j] = src[j];
375  }
376 }
377 
378 static void get_block_row16(const uint8_t *srcp, int src_linesize,
379  int y, int x, int block_size, float *dst)
380 {
381  const uint16_t *src = (uint16_t *)srcp + y * src_linesize / 2 + x;
382  int j;
383 
384  for (j = 0; j < block_size; j++) {
385  dst[j] = src[j];
386  }
387 }
388 
390  const uint8_t *ref, int ref_linesize,
391  int y, int x, int plane, int jobnr)
392 {
393  SliceContext *sc = &s->slices[jobnr];
394  const int buffer_linesize = s->block_size * s->block_size;
395  const int nb_match_blocks = sc->nb_match_blocks;
396  const int block_size = s->block_size;
397  const int width = s->planewidth[plane];
398  const int pgroup_size = s->pgroup_size;
399  const int group_size = s->group_size;
400  float *buffer = sc->buffer;
401  float *bufferh = sc->bufferh;
402  float *bufferv = sc->bufferv;
403  float *bufferz = sc->bufferz;
404  float threshold[4];
405  float den_weight, num_weight;
406  int retained = 0;
407  int i, j, k;
408 
409  for (k = 0; k < nb_match_blocks; k++) {
410  const int y = sc->match_blocks[k].y;
411  const int x = sc->match_blocks[k].x;
412 
413  for (i = 0; i < block_size; i++) {
414  s->get_block_row(src, src_linesize, y + i, x, block_size, bufferh + block_size * i);
415  av_dct_calc(sc->dctf, bufferh + block_size * i);
416  }
417 
418  for (i = 0; i < block_size; i++) {
419  for (j = 0; j < block_size; j++) {
420  bufferv[i * block_size + j] = bufferh[j * block_size + i];
421  }
422  av_dct_calc(sc->dctf, bufferv + i * block_size);
423  }
424 
425  for (i = 0; i < block_size; i++) {
426  memcpy(buffer + k * buffer_linesize + i * block_size,
427  bufferv + i * block_size, block_size * 4);
428  }
429  }
430 
431  for (i = 0; i < block_size; i++) {
432  for (j = 0; j < block_size; j++) {
433  for (k = 0; k < nb_match_blocks; k++)
434  bufferz[k] = buffer[buffer_linesize * k + i * block_size + j];
435  if (group_size > 1)
436  av_dct_calc(sc->gdctf, bufferz);
437  bufferz += pgroup_size;
438  }
439  }
440 
441  threshold[0] = s->hard_threshold * s->sigma;
442  threshold[1] = threshold[0] * sqrtf(2.f);
443  threshold[2] = threshold[0] * 2.f;
444  threshold[3] = threshold[0] * sqrtf(8.f);
445  bufferz = sc->bufferz;
446 
447  for (i = 0; i < block_size; i++) {
448  for (j = 0; j < block_size; j++) {
449  for (k = 0; k < nb_match_blocks; k++) {
450  const float thresh = threshold[(j == 0) + (i == 0) + (k == 0)];
451 
452  if (bufferz[k] > thresh || bufferz[k] < -thresh) {
453  retained++;
454  } else {
455  bufferz[k] = 0;
456  }
457  }
458  bufferz += pgroup_size;
459  }
460  }
461 
462  bufferz = sc->bufferz;
463  buffer = sc->buffer;
464  for (i = 0; i < block_size; i++) {
465  for (j = 0; j < block_size; j++) {
466  if (group_size > 1)
467  av_dct_calc(sc->gdcti, bufferz);
468  for (k = 0; k < nb_match_blocks; k++) {
469  buffer[buffer_linesize * k + i * block_size + j] = bufferz[k];
470  }
471  bufferz += pgroup_size;
472  }
473  }
474 
475  den_weight = retained < 1 ? 1.f : 1.f / retained;
476  num_weight = den_weight;
477 
478  buffer = sc->buffer;
479  for (k = 0; k < nb_match_blocks; k++) {
480  float *num = sc->num + y * width + x;
481  float *den = sc->den + y * width + x;
482 
483  for (i = 0; i < block_size; i++) {
484  memcpy(bufferv + i * block_size,
485  buffer + k * buffer_linesize + i * block_size,
486  block_size * 4);
487  }
488 
489  for (i = 0; i < block_size; i++) {
490  av_dct_calc(sc->dcti, bufferv + block_size * i);
491  for (j = 0; j < block_size; j++) {
492  bufferh[j * block_size + i] = bufferv[i * block_size + j];
493  }
494  }
495 
496  for (i = 0; i < block_size; i++) {
497  av_dct_calc(sc->dcti, bufferh + block_size * i);
498  for (j = 0; j < block_size; j++) {
499  num[j] += bufferh[i * block_size + j] * num_weight;
500  den[j] += den_weight;
501  }
502  num += width;
503  den += width;
504  }
505  }
506 }
507 
509  const uint8_t *ref, int ref_linesize,
510  int y, int x, int plane, int jobnr)
511 {
512  SliceContext *sc = &s->slices[jobnr];
513  const int buffer_linesize = s->block_size * s->block_size;
514  const int nb_match_blocks = sc->nb_match_blocks;
515  const int block_size = s->block_size;
516  const int width = s->planewidth[plane];
517  const int pgroup_size = s->pgroup_size;
518  const int group_size = s->group_size;
519  const float sigma_sqr = s->sigma * s->sigma;
520  float *buffer = sc->buffer;
521  float *bufferh = sc->bufferh;
522  float *bufferv = sc->bufferv;
523  float *bufferz = sc->bufferz;
524  float *rbuffer = sc->rbuffer;
525  float *rbufferh = sc->rbufferh;
526  float *rbufferv = sc->rbufferv;
527  float *rbufferz = sc->rbufferz;
528  float den_weight, num_weight;
529  float l2_wiener = 0;
530  int i, j, k;
531 
532  for (k = 0; k < nb_match_blocks; k++) {
533  const int y = sc->match_blocks[k].y;
534  const int x = sc->match_blocks[k].x;
535 
536  for (i = 0; i < block_size; i++) {
537  s->get_block_row(src, src_linesize, y + i, x, block_size, bufferh + block_size * i);
538  s->get_block_row(ref, ref_linesize, y + i, x, block_size, rbufferh + block_size * i);
539  av_dct_calc(sc->dctf, bufferh + block_size * i);
540  av_dct_calc(sc->dctf, rbufferh + block_size * i);
541  }
542 
543  for (i = 0; i < block_size; i++) {
544  for (j = 0; j < block_size; j++) {
545  bufferv[i * block_size + j] = bufferh[j * block_size + i];
546  rbufferv[i * block_size + j] = rbufferh[j * block_size + i];
547  }
548  av_dct_calc(sc->dctf, bufferv + i * block_size);
549  av_dct_calc(sc->dctf, rbufferv + i * block_size);
550  }
551 
552  for (i = 0; i < block_size; i++) {
553  memcpy(buffer + k * buffer_linesize + i * block_size,
554  bufferv + i * block_size, block_size * 4);
555  memcpy(rbuffer + k * buffer_linesize + i * block_size,
556  rbufferv + i * block_size, block_size * 4);
557  }
558  }
559 
560  for (i = 0; i < block_size; i++) {
561  for (j = 0; j < block_size; j++) {
562  for (k = 0; k < nb_match_blocks; k++) {
563  bufferz[k] = buffer[buffer_linesize * k + i * block_size + j];
564  rbufferz[k] = rbuffer[buffer_linesize * k + i * block_size + j];
565  }
566  if (group_size > 1) {
567  av_dct_calc(sc->gdctf, bufferz);
568  av_dct_calc(sc->gdctf, rbufferz);
569  }
570  bufferz += pgroup_size;
571  rbufferz += pgroup_size;
572  }
573  }
574 
575  bufferz = sc->bufferz;
576  rbufferz = sc->rbufferz;
577 
578  for (i = 0; i < block_size; i++) {
579  for (j = 0; j < block_size; j++) {
580  for (k = 0; k < nb_match_blocks; k++) {
581  const float ref_sqr = rbufferz[k] * rbufferz[k];
582  float wiener_coef = ref_sqr / (ref_sqr + sigma_sqr);
583 
584  if (isnan(wiener_coef))
585  wiener_coef = 1;
586  bufferz[k] *= wiener_coef;
587  l2_wiener += wiener_coef * wiener_coef;
588  }
589  bufferz += pgroup_size;
590  rbufferz += pgroup_size;
591  }
592  }
593 
594  bufferz = sc->bufferz;
595  buffer = sc->buffer;
596  for (i = 0; i < block_size; i++) {
597  for (j = 0; j < block_size; j++) {
598  if (group_size > 1)
599  av_dct_calc(sc->gdcti, bufferz);
600  for (k = 0; k < nb_match_blocks; k++) {
601  buffer[buffer_linesize * k + i * block_size + j] = bufferz[k];
602  }
603  bufferz += pgroup_size;
604  }
605  }
606 
607  l2_wiener = FFMAX(l2_wiener, 1e-15f);
608  den_weight = 1.f / l2_wiener;
609  num_weight = den_weight;
610 
611  for (k = 0; k < nb_match_blocks; k++) {
612  float *num = sc->num + y * width + x;
613  float *den = sc->den + y * width + x;
614 
615  for (i = 0; i < block_size; i++) {
616  memcpy(bufferv + i * block_size,
617  buffer + k * buffer_linesize + i * block_size,
618  block_size * 4);
619  }
620 
621  for (i = 0; i < block_size; i++) {
622  av_dct_calc(sc->dcti, bufferv + block_size * i);
623  for (j = 0; j < block_size; j++) {
624  bufferh[j * block_size + i] = bufferv[i * block_size + j];
625  }
626  }
627 
628  for (i = 0; i < block_size; i++) {
629  av_dct_calc(sc->dcti, bufferh + block_size * i);
630  for (j = 0; j < block_size; j++) {
631  num[j] += bufferh[i * block_size + j] * num_weight;
632  den[j] += den_weight;
633  }
634  num += width;
635  den += width;
636  }
637  }
638 }
639 
641  int plane, int nb_jobs)
642 {
643  const int height = s->planeheight[plane];
644  const int width = s->planewidth[plane];
645  int i, j, k;
646 
647  for (i = 0; i < height; i++) {
648  for (j = 0; j < width; j++) {
649  uint8_t *dstp = dst + i * dst_linesize;
650  float sum_den = 0.f;
651  float sum_num = 0.f;
652 
653  for (k = 0; k < nb_jobs; k++) {
654  SliceContext *sc = &s->slices[k];
655  float num = sc->num[i * width + j];
656  float den = sc->den[i * width + j];
657 
658  sum_num += num;
659  sum_den += den;
660  }
661 
662  dstp[j] = av_clip_uint8(lrintf(sum_num / sum_den));
663  }
664  }
665 }
666 
668  int plane, int nb_jobs)
669 {
670  const int height = s->planeheight[plane];
671  const int width = s->planewidth[plane];
672  const int depth = s->depth;
673  int i, j, k;
674 
675  for (i = 0; i < height; i++) {
676  for (j = 0; j < width; j++) {
677  uint16_t *dstp = (uint16_t *)dst + i * dst_linesize / 2;
678  float sum_den = 0.f;
679  float sum_num = 0.f;
680 
681  for (k = 0; k < nb_jobs; k++) {
682  SliceContext *sc = &s->slices[k];
683  float num = sc->num[i * width + j];
684  float den = sc->den[i * width + j];
685 
686  sum_num += num;
687  sum_den += den;
688  }
689 
690  dstp[j] = av_clip_uintp2_c(lrintf(sum_num / sum_den), depth);
691  }
692  }
693 }
694 
695 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
696 {
697  BM3DContext *s = ctx->priv;
698  SliceContext *sc = &s->slices[jobnr];
699  const int block_step = s->block_step;
700  ThreadData *td = arg;
701  const uint8_t *src = td->src;
702  const uint8_t *ref = td->ref;
703  const int src_linesize = td->src_linesize;
704  const int ref_linesize = td->ref_linesize;
705  const int plane = td->plane;
706  const int width = s->planewidth[plane];
707  const int height = s->planeheight[plane];
708  const int block_pos_bottom = FFMAX(0, height - s->block_size);
709  const int block_pos_right = FFMAX(0, width - s->block_size);
710  const int slice_start = (((height + block_step - 1) / block_step) * jobnr / nb_jobs) * block_step;
711  const int slice_end = (jobnr == nb_jobs - 1) ? block_pos_bottom + block_step :
712  (((height + block_step - 1) / block_step) * (jobnr + 1) / nb_jobs) * block_step;
713  int i, j;
714 
715  memset(sc->num, 0, width * height * sizeof(FFTSample));
716  memset(sc->den, 0, width * height * sizeof(FFTSample));
717 
718  for (j = slice_start; j < slice_end; j += block_step) {
719  if (j > block_pos_bottom) {
720  j = block_pos_bottom;
721  }
722 
723  for (i = 0; i < block_pos_right + block_step; i += block_step) {
724  if (i > block_pos_right) {
725  i = block_pos_right;
726  }
727 
728  block_matching(s, ref, ref_linesize, j, i, plane, jobnr);
729 
730  s->block_filtering(s, src, src_linesize,
731  ref, ref_linesize, j, i, plane, jobnr);
732  }
733  }
734 
735  return 0;
736 }
737 
739 {
740  BM3DContext *s = ctx->priv;
741  AVFilterLink *outlink = ctx->outputs[0];
742  int p;
743 
744  *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
745  if (!*out)
746  return AVERROR(ENOMEM);
747  av_frame_copy_props(*out, in);
748 
749  for (p = 0; p < s->nb_planes; p++) {
750  const int nb_jobs = FFMAX(1, FFMIN(s->nb_threads, s->planeheight[p] / s->block_size));
751  ThreadData td;
752 
753  if (!((1 << p) & s->planes) || ctx->is_disabled) {
754  av_image_copy_plane((*out)->data[p], (*out)->linesize[p],
755  in->data[p], in->linesize[p],
756  s->planewidth[p], s->planeheight[p]);
757  continue;
758  }
759 
760  td.src = in->data[p];
761  td.src_linesize = in->linesize[p];
762  td.ref = ref->data[p];
763  td.ref_linesize = ref->linesize[p];
764  td.plane = p;
765  ctx->internal->execute(ctx, filter_slice, &td, NULL, nb_jobs);
766 
767  s->do_output(s, (*out)->data[p], (*out)->linesize[p], p, nb_jobs);
768  }
769 
770  return 0;
771 }
772 
773 #define SQR(x) ((x) * (x))
774 
776 {
778  AVFilterContext *ctx = inlink->dst;
779  BM3DContext *s = ctx->priv;
780  int i, group_bits;
781 
784  s->depth = desc->comp[0].depth;
785  s->max = (1 << s->depth) - 1;
786  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
787  s->planeheight[0] = s->planeheight[3] = inlink->h;
788  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
789  s->planewidth[0] = s->planewidth[3] = inlink->w;
790 
791  for (group_bits = 4; 1 << group_bits < s->group_size; group_bits++);
792  s->group_bits = group_bits;
793  s->pgroup_size = 1 << group_bits;
794 
795  for (i = 0; i < s->nb_threads; i++) {
796  SliceContext *sc = &s->slices[i];
797 
798  sc->num = av_calloc(FFALIGN(s->planewidth[0], s->block_size) * FFALIGN(s->planeheight[0], s->block_size), sizeof(FFTSample));
799  sc->den = av_calloc(FFALIGN(s->planewidth[0], s->block_size) * FFALIGN(s->planeheight[0], s->block_size), sizeof(FFTSample));
800  if (!sc->num || !sc->den)
801  return AVERROR(ENOMEM);
802 
805  if (!sc->dctf || !sc->dcti)
806  return AVERROR(ENOMEM);
807 
808  if (s->group_bits > 1) {
809  sc->gdctf = av_dct_init(s->group_bits, DCT_II);
810  sc->gdcti = av_dct_init(s->group_bits, DCT_III);
811  if (!sc->gdctf || !sc->gdcti)
812  return AVERROR(ENOMEM);
813  }
814 
815  sc->buffer = av_calloc(s->block_size * s->block_size * s->pgroup_size, sizeof(*sc->buffer));
816  sc->bufferz = av_calloc(s->block_size * s->block_size * s->pgroup_size, sizeof(*sc->bufferz));
817  sc->bufferh = av_calloc(s->block_size * s->block_size, sizeof(*sc->bufferh));
818  sc->bufferv = av_calloc(s->block_size * s->block_size, sizeof(*sc->bufferv));
819  if (!sc->bufferh || !sc->bufferv || !sc->buffer || !sc->bufferz)
820  return AVERROR(ENOMEM);
821 
822  if (s->mode == FINAL) {
823  sc->rbuffer = av_calloc(s->block_size * s->block_size * s->pgroup_size, sizeof(*sc->rbuffer));
824  sc->rbufferz = av_calloc(s->block_size * s->block_size * s->pgroup_size, sizeof(*sc->rbufferz));
825  sc->rbufferh = av_calloc(s->block_size * s->block_size, sizeof(*sc->rbufferh));
826  sc->rbufferv = av_calloc(s->block_size * s->block_size, sizeof(*sc->rbufferv));
827  if (!sc->rbufferh || !sc->rbufferv || !sc->rbuffer || !sc->rbufferz)
828  return AVERROR(ENOMEM);
829  }
830 
831  sc->search_positions = av_calloc(SQR(2 * s->bm_range / s->bm_step + 1), sizeof(*sc->search_positions));
832  if (!sc->search_positions)
833  return AVERROR(ENOMEM);
834  }
835 
836  s->do_output = do_output;
839 
840  if (s->depth > 8) {
841  s->do_output = do_output16;
844  }
845 
846  return 0;
847 }
848 
850 {
851  BM3DContext *s = ctx->priv;
852 
853  if (!s->ref) {
854  AVFrame *frame = NULL;
855  AVFrame *out = NULL;
856  int ret, status;
857  int64_t pts;
858 
859  FF_FILTER_FORWARD_STATUS_BACK(ctx->outputs[0], ctx->inputs[0]);
860 
861  if ((ret = ff_inlink_consume_frame(ctx->inputs[0], &frame)) > 0) {
862  ret = filter_frame(ctx, &out, frame, frame);
863  av_frame_free(&frame);
864  if (ret < 0)
865  return ret;
866  ret = ff_filter_frame(ctx->outputs[0], out);
867  }
868  if (ret < 0) {
869  return ret;
870  } else if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
871  ff_outlink_set_status(ctx->outputs[0], status, pts);
872  return 0;
873  } else {
874  if (ff_outlink_frame_wanted(ctx->outputs[0]))
876  return 0;
877  }
878  } else {
879  return ff_framesync_activate(&s->fs);
880  }
881 }
882 
884 {
885  AVFilterContext *ctx = fs->parent;
886  BM3DContext *s = fs->opaque;
887  AVFilterLink *outlink = ctx->outputs[0];
888  AVFrame *out = NULL, *src, *ref;
889  int ret;
890 
891  if ((ret = ff_framesync_get_frame(&s->fs, 0, &src, 0)) < 0 ||
892  (ret = ff_framesync_get_frame(&s->fs, 1, &ref, 0)) < 0)
893  return ret;
894 
895  if ((ret = filter_frame(ctx, &out, src, ref)) < 0)
896  return ret;
897 
898  out->pts = av_rescale_q(src->pts, s->fs.time_base, outlink->time_base);
899 
900  return ff_filter_frame(outlink, out);
901 }
902 
904 {
905  BM3DContext *s = ctx->priv;
906  AVFilterPad pad = { 0 };
907  int ret;
908 
909  if (s->mode == BASIC) {
910  if (s->th_mse == 0.f)
911  s->th_mse = 400.f + s->sigma * 80.f;
913  } else if (s->mode == FINAL) {
914  if (!s->ref) {
915  av_log(ctx, AV_LOG_WARNING, "Reference stream is mandatory in final estimation mode.\n");
916  s->ref = 1;
917  }
918  if (s->th_mse == 0.f)
919  s->th_mse = 200.f + s->sigma * 10.f;
920 
922  } else {
923  return AVERROR_BUG;
924  }
925 
926  s->block_size = 1 << s->block_size;
927 
928  if (s->block_step > s->block_size) {
929  av_log(ctx, AV_LOG_WARNING, "bstep: %d can't be bigger than block size. Changing to %d.\n",
930  s->block_step, s->block_size);
931  s->block_step = s->block_size;
932  }
933  if (s->bm_step > s->bm_range) {
934  av_log(ctx, AV_LOG_WARNING, "mstep: %d can't be bigger than block matching range. Changing to %d.\n",
935  s->bm_step, s->bm_range);
936  s->bm_step = s->bm_range;
937  }
938 
939  pad.type = AVMEDIA_TYPE_VIDEO;
940  pad.name = av_strdup("source");
942  if (!pad.name)
943  return AVERROR(ENOMEM);
944 
945  if ((ret = ff_insert_inpad(ctx, 0, &pad)) < 0) {
946  av_freep(&pad.name);
947  return ret;
948  }
949 
950  if (s->ref) {
951  pad.type = AVMEDIA_TYPE_VIDEO;
952  pad.name = av_strdup("reference");
953  pad.config_props = NULL;
954  if (!pad.name)
955  return AVERROR(ENOMEM);
956 
957  if ((ret = ff_insert_inpad(ctx, 1, &pad)) < 0) {
958  av_freep(&pad.name);
959  return ret;
960  }
961  }
962 
963  return 0;
964 }
965 
966 static int config_output(AVFilterLink *outlink)
967 {
968  AVFilterContext *ctx = outlink->src;
969  BM3DContext *s = ctx->priv;
970  AVFilterLink *src = ctx->inputs[0];
971  AVFilterLink *ref;
972  FFFrameSyncIn *in;
973  int ret;
974 
975  if (s->ref) {
976  ref = ctx->inputs[1];
977 
978  if (src->format != ref->format) {
979  av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
980  return AVERROR(EINVAL);
981  }
982  if (src->w != ref->w ||
983  src->h != ref->h) {
984  av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
985  "(size %dx%d) do not match the corresponding "
986  "second input link %s parameters (%dx%d) ",
987  ctx->input_pads[0].name, src->w, src->h,
988  ctx->input_pads[1].name, ref->w, ref->h);
989  return AVERROR(EINVAL);
990  }
991  }
992 
993  outlink->w = src->w;
994  outlink->h = src->h;
995  outlink->time_base = src->time_base;
997  outlink->frame_rate = src->frame_rate;
998 
999  if (!s->ref)
1000  return 0;
1001 
1002  if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
1003  return ret;
1004 
1005  in = s->fs.in;
1006  in[0].time_base = src->time_base;
1007  in[1].time_base = ref->time_base;
1008  in[0].sync = 1;
1009  in[0].before = EXT_STOP;
1010  in[0].after = EXT_STOP;
1011  in[1].sync = 1;
1012  in[1].before = EXT_STOP;
1013  in[1].after = EXT_STOP;
1014  s->fs.opaque = s;
1015  s->fs.on_event = process_frame;
1016 
1017  return ff_framesync_configure(&s->fs);
1018 }
1019 
1021 {
1022  BM3DContext *s = ctx->priv;
1023  int i;
1024 
1025  for (i = 0; i < ctx->nb_inputs; i++)
1026  av_freep(&ctx->input_pads[i].name);
1027 
1028  if (s->ref)
1029  ff_framesync_uninit(&s->fs);
1030 
1031  for (i = 0; i < s->nb_threads; i++) {
1032  SliceContext *sc = &s->slices[i];
1033 
1034  av_freep(&sc->num);
1035  av_freep(&sc->den);
1036 
1037  av_dct_end(sc->gdctf);
1038  av_dct_end(sc->gdcti);
1039  av_dct_end(sc->dctf);
1040  av_dct_end(sc->dcti);
1041 
1042  av_freep(&sc->buffer);
1043  av_freep(&sc->bufferh);
1044  av_freep(&sc->bufferv);
1045  av_freep(&sc->bufferz);
1046  av_freep(&sc->rbuffer);
1047  av_freep(&sc->rbufferh);
1048  av_freep(&sc->rbufferv);
1049  av_freep(&sc->rbufferz);
1050 
1051  av_freep(&sc->search_positions);
1052  }
1053 }
1054 
1055 static const AVFilterPad bm3d_outputs[] = {
1056  {
1057  .name = "default",
1058  .type = AVMEDIA_TYPE_VIDEO,
1059  .config_props = config_output,
1060  },
1061  { NULL }
1062 };
1063 
1065  .name = "bm3d",
1066  .description = NULL_IF_CONFIG_SMALL("Block-Matching 3D denoiser."),
1067  .priv_size = sizeof(BM3DContext),
1068  .init = init,
1069  .uninit = uninit,
1070  .activate = activate,
1072  .inputs = NULL,
1073  .outputs = bm3d_outputs,
1074  .priv_class = &bm3d_class,
1078 };
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1494
#define NULL
Definition: coverity.c:32
AVFrame * out
Definition: af_adeclick.c:488
static int filter_frame(AVFilterContext *ctx, AVFrame **out, AVFrame *in, AVFrame *ref)
Definition: vf_bm3d.c:738
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:389
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static void do_output(BM3DContext *s, uint8_t *dst, int dst_linesize, int plane, int nb_jobs)
Definition: vf_bm3d.c:640
AVOption.
Definition: opt.h:246
AVFILTER_DEFINE_CLASS(bm3d)
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
FFTSample * rbufferv
Definition: vf_bm3d.c:80
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
AVFrame * frame
Definition: dsddec.c:65
Definition: avfft.h:95
Main libavfilter public API header.
else temp
Definition: vf_mcdeint.c:256
static int search_boundary(int plane_boundary, int search_range, int search_step, int vertical, int y, int x)
Definition: vf_bm3d.c:218
AVFilter ff_vf_bm3d
Definition: vf_bm3d.c:1064
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
DCTContext * av_dct_init(int nbits, enum DCTTransformType type)
Set up DCT.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
SliceContext slices[MAX_NB_THREADS]
Definition: vf_bm3d.c:112
int av_log2(unsigned v)
Definition: intmath.c:26
int nb_planes
Definition: vf_bm3d.c:106
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:367
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:117
FFTSample * bufferv
Definition: vf_bm3d.c:76
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
int src_stride
Definition: vf_unsharp.c:55
int is_disabled
the enabled state from the last expression evaluation
Definition: avfilter.h:385
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1620
int ref_linesize
Definition: vf_bm3d.c:59
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
Definition: framesync.h:86
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
static void get_block_row(const uint8_t *srcp, int src_linesize, int y, int x, int block_size, float *dst)
Definition: vf_bm3d.c:367
int block_size
Definition: vf_bm3d.c:93
Definition: vf_bm3d.c:50
static const AVFilterPad bm3d_outputs[]
Definition: vf_bm3d.c:1055
BYTE int const BYTE * srcp
Definition: avisynth_c.h:908
const char * name
Pad name.
Definition: internal.h:60
AVFilterContext * parent
Parent filter context.
Definition: framesync.h:152
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
void(* get_block_row)(const uint8_t *srcp, int src_linesize, int y, int x, int block_size, float *dst)
Definition: vf_bm3d.c:117
static const AVOption bm3d_options[]
Definition: vf_bm3d.c:132
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
DCTContext * gdctf
Definition: vf_bm3d.c:73
AVOptions.
int nb_threads
Definition: vf_bm3d.c:115
#define f(width, name)
Definition: cbs_vp9.c:255
FFTSample * buffer
Definition: vf_bm3d.c:78
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
FFFrameSyncIn * in
Pointer to array of inputs.
Definition: framesync.h:203
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
int height
Definition: vf_avgblur.c:61
static void do_output16(BM3DContext *s, uint8_t *dst, int dst_linesize, int plane, int nb_jobs)
Definition: vf_bm3d.c:667
AVFrame * dst
Definition: vf_blend.c:55
int plane
Definition: vf_blend.c:57
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define lrintf(x)
Definition: libm_mips.h:70
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
int nb_match_blocks
Definition: vf_bm3d.c:85
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
Definition: framesync.h:91
static void get_block_row16(const uint8_t *srcp, int src_linesize, int y, int x, int block_size, float *dst)
Definition: vf_bm3d.c:378
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_bm3d.c:695
Input stream structure.
Definition: framesync.h:81
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function.If this function returns true
FFTSample * rbuffer
Definition: vf_bm3d.c:82
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
Definition: vf_bm3d.c:51
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1449
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
DCTContext * dcti
Definition: vf_bm3d.c:74
int bm_range
Definition: vf_bm3d.c:96
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
#define td
Definition: regdef.h:70
int step
Definition: vf_remap.c:77
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:293
FFTSample * bufferh
Definition: vf_bm3d.c:75
Frame sync structure.
Definition: framesync.h:146
BYTE * dstp
Definition: avisynth_c.h:908
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
int y
Definition: vf_bm3d.c:64
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
const char * arg
Definition: jacosubdec.c:66
simple assert() macros that are a bit more flexible than ISO C assert().
AVRational time_base
Time base for the incoming frames.
Definition: framesync.h:96
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter&#39;s input and try to produce output.
Definition: framesync.c:344
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
#define FLAGS
Definition: vf_bm3d.c:131
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
Definition: framesync.h:172
#define FFMAX(a, b)
Definition: common.h:94
float FFTSample
Definition: avfft.h:35
int group_bits
Definition: vf_bm3d.c:109
const uint8_t * src
Definition: vf_bm3d.c:56
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
FFTSample * rbufferh
Definition: vf_bm3d.c:79
static const struct @321 planes[]
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
int(* config_props)(AVFilterLink *link)
Link configuration callback.
Definition: internal.h:129
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
int group_size
Definition: vf_bm3d.c:95
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
void(* block_filtering)(struct BM3DContext *s, const uint8_t *src, int src_linesize, const uint8_t *ref, int ref_linesize, int y, int x, int plane, int jobnr)
Definition: vf_bm3d.c:124
int planeheight[4]
Definition: vf_bm3d.c:108
int depth
Definition: vf_bm3d.c:104
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static av_cold int init(AVFilterContext *ctx)
Definition: vf_bm3d.c:903
float th_mse
Definition: vf_bm3d.c:98
static int cmp_scores(const void *a, const void *b)
Definition: vf_bm3d.c:223
static void do_block_matching_multi(BM3DContext *s, const uint8_t *src, int src_stride, int src_range, const PosCode *search_pos, int search_size, float th_mse, int r_y, int r_x, int plane, int jobnr)
Definition: vf_bm3d.c:272
AVFormatContext * ctx
Definition: movenc.c:48
Definition: dct.h:32
AVRational time_base
Time base for the output events.
Definition: framesync.h:162
double(* do_block_ssd)(struct BM3DContext *s, PosCode *pos, const uint8_t *src, int src_stride, int r_y, int r_x)
Definition: vf_bm3d.c:119
static int process_frame(FFFrameSync *fs)
Definition: vf_bm3d.c:883
#define MAX_NB_THREADS
Definition: vf_bm3d.c:47
float * den
Definition: vf_bm3d.c:83
void * opaque
Opaque pointer, not used by the API.
Definition: framesync.h:177
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
static double do_block_ssd(BM3DContext *s, PosCode *pos, const uint8_t *src, int src_stride, int r_y, int r_x)
Definition: vf_bm3d.c:230
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
static int do_search_boundary(int pos, int plane_boundary, int search_range, int search_step)
Definition: vf_bm3d.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int activate(AVFilterContext *ctx)
Definition: vf_bm3d.c:849
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
static double do_block_ssd16(BM3DContext *s, PosCode *pos, const uint8_t *src, int src_stride, int r_y, int r_x)
Definition: vf_bm3d.c:251
#define OFFSET(x)
Definition: vf_bm3d.c:130
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int block_step
Definition: vf_bm3d.c:94
Used for passing data between threads.
Definition: dsddec.c:64
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
Definition: framesync.c:77
int planes
Definition: vf_bm3d.c:102
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_bm3d.c:1020
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
FFT functions.
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:370
DCTContext * dctf
Definition: vf_bm3d.c:74
int x
Definition: vf_bm3d.c:64
FilterModes
Definition: vf_bm3d.c:49
float * num
Definition: vf_bm3d.c:83
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
void av_dct_end(DCTContext *s)
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events...
Definition: framesync.h:139
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
int index
Definition: gxfenc.c:89
double score
Definition: vf_bm3d.c:68
#define isnan(x)
Definition: libm.h:340
AVFrame * b
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
void(* do_output)(struct BM3DContext *s, uint8_t *dst, int dst_linesize, int plane, int nb_jobs)
Definition: vf_bm3d.c:122
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:393
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
int bm_step
Definition: vf_bm3d.c:97
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define SQR(x)
Definition: vf_bm3d.c:773
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
int mode
Definition: vf_bm3d.c:100
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
float hard_threshold
Definition: vf_bm3d.c:99
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
int pgroup_size
Definition: vf_bm3d.c:110
PosPairCode match_blocks[256]
Definition: vf_bm3d.c:84
AVFrame * threshold
Definition: vf_threshold.c:73
DCTContext * gdcti
Definition: vf_bm3d.c:73
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
FFTSample * rbufferz
Definition: vf_bm3d.c:81
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
Definition: avfft.h:94
avfilter_execute_func * execute
Definition: internal.h:155
void av_dct_calc(DCTContext *s, FFTSample *data)
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2036
Completely stop all streams with this one.
Definition: framesync.h:65
static void final_block_filtering(BM3DContext *s, const uint8_t *src, int src_linesize, const uint8_t *ref, int ref_linesize, int y, int x, int plane, int jobnr)
Definition: vf_bm3d.c:508
static void block_matching(BM3DContext *s, const uint8_t *ref, int ref_linesize, int j, int i, int plane, int jobnr)
Definition: vf_bm3d.c:350
const AVPixFmtDescriptor * desc
Definition: vf_tonemap.c:196
const uint8_t * ref
Definition: vf_bm3d.c:58
A list of supported formats for one end of a filter link.
Definition: formats.h:64
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
static int config_input(AVFilterLink *inlink)
Definition: vf_bm3d.c:775
static void block_matching_multi(BM3DContext *s, const uint8_t *ref, int ref_linesize, int y, int x, int exclude_cur_pos, int plane, int jobnr)
Definition: vf_bm3d.c:310
An instance of a filter.
Definition: avfilter.h:338
FFFrameSync fs
Definition: vf_bm3d.c:114
int planewidth[4]
Definition: vf_bm3d.c:107
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
float sigma
Definition: vf_bm3d.c:92
static int query_formats(AVFilterContext *ctx)
Definition: vf_bm3d.c:164
AVFrame * in
Definition: af_afftdn.c:1082
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
FFTSample * bufferz
Definition: vf_bm3d.c:77
AVFilterLink * inlink
Definition: vf_blend.c:56
PosCode * search_positions
Definition: vf_bm3d.c:86
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, unsigned get)
Get the current frame in an input.
Definition: framesync.c:256
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVFrame * a
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int src_linesize
Definition: vf_bm3d.c:57
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
GLuint buffer
Definition: opengl_enc.c:101
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:277
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:229
static int config_output(AVFilterLink *outlink)
Definition: vf_bm3d.c:966
static void basic_block_filtering(BM3DContext *s, const uint8_t *src, int src_linesize, const uint8_t *ref, int ref_linesize, int y, int x, int plane, int jobnr)
Definition: vf_bm3d.c:389