FFmpeg
vf_vif.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
3  * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
4  * Copyright (c) 2021 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Calculate VIF between two input videos.
26  */
27 
28 #include <float.h>
29 
30 #include "libavutil/avstring.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avfilter.h"
34 #include "framesync.h"
35 #include "drawutils.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 #define NUM_DATA_BUFS 13
41 
42 typedef struct VIFContext {
43  const AVClass *class;
46  int width;
47  int height;
49  float factor;
51  float **temp;
52  float *ref_data;
53  float *main_data;
54  double vif_sum[4];
55  double vif_min[4];
56  double vif_max[4];
57  uint64_t nb_frames;
58 } VIFContext;
59 
60 #define OFFSET(x) offsetof(VIFContext, x)
61 
62 static const AVOption vif_options[] = {
63  { NULL }
64 };
65 
67 
68 static const uint8_t vif_filter1d_width1[4] = { 17, 9, 5, 3 };
69 
70 static const float vif_filter1d_table[4][17] =
71 {
72  {
73  0.00745626912, 0.0142655009, 0.0250313189, 0.0402820669, 0.0594526194,
74  0.0804751068, 0.0999041125, 0.113746084, 0.118773937, 0.113746084,
75  0.0999041125, 0.0804751068, 0.0594526194, 0.0402820669, 0.0250313189,
76  0.0142655009, 0.00745626912
77  },
78  {
79  0.0189780835, 0.0558981746, 0.120920904, 0.192116052, 0.224173605,
80  0.192116052, 0.120920904, 0.0558981746, 0.0189780835
81  },
82  {
83  0.054488685, 0.244201347, 0.402619958, 0.244201347, 0.054488685
84  },
85  {
86  0.166378498, 0.667243004, 0.166378498
87  }
88 };
89 
90 typedef struct ThreadData {
91  const float *filter;
92  const float *src;
93  float *dst;
94  int w, h;
98  float **temp;
99 } ThreadData;
100 
101 static void vif_dec2(const float *src, float *dst, int w, int h,
102  int src_stride, int dst_stride)
103 {
104  const int dst_px_stride = dst_stride / 2;
105 
106  for (int i = 0; i < h / 2; i++) {
107  for (int j = 0; j < w / 2; j++)
108  dst[i * dst_px_stride + j] = src[(i * 2) * src_stride + (j * 2)];
109  }
110 }
111 
112 static void vif_statistic(const float *mu1_sq, const float *mu2_sq,
113  const float *mu1_mu2, const float *xx_filt,
114  const float *yy_filt, const float *xy_filt,
115  float *num, float *den, int w, int h)
116 {
117  static const float sigma_nsq = 2;
118  float mu1_sq_val, mu2_sq_val, mu1_mu2_val, xx_filt_val, yy_filt_val, xy_filt_val;
119  float sigma1_sq, sigma2_sq, sigma12, g, sv_sq, eps = 1.0e-10f;
120  float gain_limit = 100.f;
121  float num_val, den_val;
122  float accum_num = 0.0f;
123  float accum_den = 0.0f;
124 
125  for (int i = 0; i < h; i++) {
126  float accum_inner_num = 0.f;
127  float accum_inner_den = 0.f;
128 
129  for (int j = 0; j < w; j++) {
130  mu1_sq_val = mu1_sq[i * w + j];
131  mu2_sq_val = mu2_sq[i * w + j];
132  mu1_mu2_val = mu1_mu2[i * w + j];
133  xx_filt_val = xx_filt[i * w + j];
134  yy_filt_val = yy_filt[i * w + j];
135  xy_filt_val = xy_filt[i * w + j];
136 
137  sigma1_sq = xx_filt_val - mu1_sq_val;
138  sigma2_sq = yy_filt_val - mu2_sq_val;
139  sigma12 = xy_filt_val - mu1_mu2_val;
140 
141  sigma1_sq = FFMAX(sigma1_sq, 0.0f);
142  sigma2_sq = FFMAX(sigma2_sq, 0.0f);
143  sigma12 = FFMAX(sigma12, 0.0f);
144 
145  g = sigma12 / (sigma1_sq + eps);
146  sv_sq = sigma2_sq - g * sigma12;
147 
148  if (sigma1_sq < eps) {
149  g = 0.0f;
150  sv_sq = sigma2_sq;
151  sigma1_sq = 0.0f;
152  }
153 
154  if (sigma2_sq < eps) {
155  g = 0.0f;
156  sv_sq = 0.0f;
157  }
158 
159  if (g < 0.0f) {
160  sv_sq = sigma2_sq;
161  g = 0.0f;
162  }
163  sv_sq = FFMAX(sv_sq, eps);
164 
165  g = FFMIN(g, gain_limit);
166 
167  num_val = log2f(1.0f + g * g * sigma1_sq / (sv_sq + sigma_nsq));
168  den_val = log2f(1.0f + sigma1_sq / sigma_nsq);
169 
170  if (isnan(den_val))
171  num_val = den_val = 1.f;
172 
173  accum_inner_num += num_val;
174  accum_inner_den += den_val;
175  }
176 
177  accum_num += accum_inner_num;
178  accum_den += accum_inner_den;
179  }
180 
181  num[0] = accum_num;
182  den[0] = accum_den;
183 }
184 
185 static void vif_xx_yy_xy(const float *x, const float *y, float *xx, float *yy,
186  float *xy, int w, int h)
187 {
188  for (int i = 0; i < h; i++) {
189  for (int j = 0; j < w; j++) {
190  float xval = x[j];
191  float yval = y[j];
192  float xxval = xval * xval;
193  float yyval = yval * yval;
194  float xyval = xval * yval;
195 
196  xx[j] = xxval;
197  yy[j] = yyval;
198  xy[j] = xyval;
199  }
200 
201  xx += w;
202  yy += w;
203  xy += w;
204  x += w;
205  y += w;
206  }
207 }
208 
209 static int vif_filter1d(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
210 {
211  ThreadData *td = arg;
212  const float *filter = td->filter;
213  const float *src = td->src;
214  float *dst = td->dst;
215  int w = td->w;
216  int h = td->h;
217  int src_stride = td->src_stride;
218  int dst_stride = td->dst_stride;
219  int filt_w = td->filter_width;
220  float *temp = td->temp[jobnr];
221  const int slice_start = (h * jobnr) / nb_jobs;
222  const int slice_end = (h * (jobnr+1)) / nb_jobs;
223 
224  for (int i = slice_start; i < slice_end; i++) {
225  /** Vertical pass. */
226  for (int j = 0; j < w; j++) {
227  float sum = 0.f;
228 
229  if (i >= filt_w / 2 && i < h - filt_w / 2 - 1) {
230  for (int filt_i = 0; filt_i < filt_w; filt_i++) {
231  const float filt_coeff = filter[filt_i];
232  float img_coeff;
233  int ii = i - filt_w / 2 + filt_i;
234 
235  img_coeff = src[ii * src_stride + j];
236  sum += filt_coeff * img_coeff;
237  }
238  } else {
239  for (int filt_i = 0; filt_i < filt_w; filt_i++) {
240  const float filt_coeff = filter[filt_i];
241  int ii = i - filt_w / 2 + filt_i;
242  float img_coeff;
243 
244  ii = ii < 0 ? -ii : (ii >= h ? 2 * h - ii - 1 : ii);
245 
246  img_coeff = src[ii * src_stride + j];
247  sum += filt_coeff * img_coeff;
248  }
249  }
250 
251  temp[j] = sum;
252  }
253 
254  /** Horizontal pass. */
255  for (int j = 0; j < w; j++) {
256  float sum = 0.f;
257 
258  if (j >= filt_w / 2 && j < w - filt_w / 2 - 1) {
259  for (int filt_j = 0; filt_j < filt_w; filt_j++) {
260  const float filt_coeff = filter[filt_j];
261  int jj = j - filt_w / 2 + filt_j;
262  float img_coeff;
263 
264  img_coeff = temp[jj];
265  sum += filt_coeff * img_coeff;
266  }
267  } else {
268  for (int filt_j = 0; filt_j < filt_w; filt_j++) {
269  const float filt_coeff = filter[filt_j];
270  int jj = j - filt_w / 2 + filt_j;
271  float img_coeff;
272 
273  jj = jj < 0 ? -jj : (jj >= w ? 2 * w - jj - 1 : jj);
274 
275  img_coeff = temp[jj];
276  sum += filt_coeff * img_coeff;
277  }
278  }
279 
280  dst[i * dst_stride + j] = sum;
281  }
282  }
283 
284  return 0;
285 }
286 
288  const float *ref, const float *main, int w, int h,
289  int ref_stride, int main_stride, float *score,
290  float *const data_buf[NUM_DATA_BUFS], float **temp,
291  int gnb_threads)
292 {
293  ThreadData td;
294  float *ref_scale = data_buf[0];
295  float *main_scale = data_buf[1];
296  float *ref_sq = data_buf[2];
297  float *main_sq = data_buf[3];
298  float *ref_main = data_buf[4];
299  float *mu1 = data_buf[5];
300  float *mu2 = data_buf[6];
301  float *mu1_sq = data_buf[7];
302  float *mu2_sq = data_buf[8];
303  float *mu1_mu2 = data_buf[9];
304  float *ref_sq_filt = data_buf[10];
305  float *main_sq_filt = data_buf[11];
306  float *ref_main_filt = data_buf[12];
307 
308  float *curr_ref_scale = (float *)ref;
309  float *curr_main_scale = (float *)main;
310  int curr_ref_stride = ref_stride;
311  int curr_main_stride = main_stride;
312 
313  float num = 0.f;
314  float den = 0.f;
315 
316  for (int scale = 0; scale < 4; scale++) {
317  const float *filter = vif_filter1d_table[scale];
318  int filter_width = vif_filter1d_width1[scale];
319  const int nb_threads = FFMIN(h, gnb_threads);
320  int buf_valid_w = w;
321  int buf_valid_h = h;
322 
323  td.filter = filter;
324  td.filter_width = filter_width;
325 
326  if (scale > 0) {
327  td.src = curr_ref_scale;
328  td.dst = mu1;
329  td.w = w;
330  td.h = h;
331  td.src_stride = curr_ref_stride;
332  td.dst_stride = w;
333  td.temp = temp;
334  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
335 
336  td.src = curr_main_scale;
337  td.dst = mu2;
338  td.src_stride = curr_main_stride;
339  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
340 
341  vif_dec2(mu1, ref_scale, buf_valid_w, buf_valid_h, w, w);
342  vif_dec2(mu2, main_scale, buf_valid_w, buf_valid_h, w, w);
343 
344  w = buf_valid_w / 2;
345  h = buf_valid_h / 2;
346 
347  buf_valid_w = w;
348  buf_valid_h = h;
349 
350  curr_ref_scale = ref_scale;
351  curr_main_scale = main_scale;
352 
353  curr_ref_stride = w;
354  curr_main_stride = w;
355  }
356 
357  td.src = curr_ref_scale;
358  td.dst = mu1;
359  td.w = w;
360  td.h = h;
361  td.src_stride = curr_ref_stride;
362  td.dst_stride = w;
363  td.temp = temp;
364  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
365 
366  td.src = curr_main_scale;
367  td.dst = mu2;
368  td.src_stride = curr_main_stride;
369  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
370 
371  vif_xx_yy_xy(mu1, mu2, mu1_sq, mu2_sq, mu1_mu2, w, h);
372 
373  vif_xx_yy_xy(curr_ref_scale, curr_main_scale, ref_sq, main_sq, ref_main, w, h);
374 
375  td.src = ref_sq;
376  td.dst = ref_sq_filt;
377  td.src_stride = w;
378  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
379 
380  td.src = main_sq;
381  td.dst = main_sq_filt;
382  td.src_stride = w;
383  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
384 
385  td.src = ref_main;
386  td.dst = ref_main_filt;
387  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
388 
389  vif_statistic(mu1_sq, mu2_sq, mu1_mu2, ref_sq_filt, main_sq_filt,
390  ref_main_filt, &num, &den, w, h);
391 
392  score[scale] = den <= FLT_EPSILON ? 1.f : num / den;
393  }
394 
395  return 0;
396 }
397 
398 #define offset_fn(type, bits) \
399 static void offset_##bits##bit(VIFContext *s, \
400  const AVFrame *ref, \
401  AVFrame *main, int stride)\
402 { \
403  int w = s->width; \
404  int h = s->height; \
405  \
406  int ref_stride = ref->linesize[0]; \
407  int main_stride = main->linesize[0]; \
408  \
409  const type *ref_ptr = (const type *) ref->data[0]; \
410  const type *main_ptr = (const type *) main->data[0]; \
411  \
412  const float factor = s->factor; \
413  \
414  float *ref_ptr_data = s->ref_data; \
415  float *main_ptr_data = s->main_data; \
416  \
417  for (int i = 0; i < h; i++) { \
418  for (int j = 0; j < w; j++) { \
419  ref_ptr_data[j] = ref_ptr[j] * factor - 128.f; \
420  main_ptr_data[j] = main_ptr[j] * factor - 128.f; \
421  } \
422  ref_ptr += ref_stride / sizeof(type); \
423  ref_ptr_data += w; \
424  main_ptr += main_stride / sizeof(type); \
425  main_ptr_data += w; \
426  } \
427 }
428 
429 offset_fn(uint8_t, 8)
430 offset_fn(uint16_t, 16)
431 
432 static void set_meta(AVDictionary **metadata, const char *key, float d)
433 {
434  char value[257];
435  snprintf(value, sizeof(value), "%f", d);
436  av_dict_set(metadata, key, value, 0);
437 }
438 
440 {
441  VIFContext *s = ctx->priv;
442  AVDictionary **metadata = &main->metadata;
443  float score[4];
444 
445  s->factor = 1.f / (1 << (s->desc->comp[0].depth - 8));
446  if (s->desc->comp[0].depth <= 8) {
447  offset_8bit(s, ref, main, s->width);
448  } else {
449  offset_16bit(s, ref, main, s->width);
450  }
451 
452  compute_vif2(ctx, s->ref_data, s->main_data,
453  s->width, s->height, s->width, s->width,
454  score, s->data_buf, s->temp, s->nb_threads);
455 
456  set_meta(metadata, "lavfi.vif.scale.0", score[0]);
457  set_meta(metadata, "lavfi.vif.scale.1", score[1]);
458  set_meta(metadata, "lavfi.vif.scale.2", score[2]);
459  set_meta(metadata, "lavfi.vif.scale.3", score[3]);
460 
461  for (int i = 0; i < 4; i++) {
462  s->vif_min[i] = FFMIN(s->vif_min[i], score[i]);
463  s->vif_max[i] = FFMAX(s->vif_max[i], score[i]);
464  s->vif_sum[i] += score[i];
465  }
466 
467  s->nb_frames++;
468 
469  return main;
470 }
471 
473 {
474  static const enum AVPixelFormat pix_fmts[] = {
481 #define PF(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
482  PF(P9), PF(P10), PF(P12), PF(P14), PF(P16),
484  };
485 
487 }
488 
490 {
491  AVFilterContext *ctx = inlink->dst;
492  VIFContext *s = ctx->priv;
493 
494  if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
495  ctx->inputs[0]->h != ctx->inputs[1]->h) {
496  av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
497  return AVERROR(EINVAL);
498  }
499  if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
500  av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
501  return AVERROR(EINVAL);
502  }
503 
504  s->desc = av_pix_fmt_desc_get(inlink->format);
505  s->width = ctx->inputs[0]->w;
506  s->height = ctx->inputs[0]->h;
507  s->nb_threads = ff_filter_get_nb_threads(ctx);
508 
509  for (int i = 0; i < 4; i++) {
510  s->vif_min[i] = DBL_MAX;
511  s->vif_max[i] = -DBL_MAX;
512  }
513 
514  for (int i = 0; i < NUM_DATA_BUFS; i++) {
515  if (!(s->data_buf[i] = av_calloc(s->width, s->height * sizeof(float))))
516  return AVERROR(ENOMEM);
517  }
518 
519  if (!(s->ref_data = av_calloc(s->width, s->height * sizeof(float))))
520  return AVERROR(ENOMEM);
521 
522  if (!(s->main_data = av_calloc(s->width, s->height * sizeof(float))))
523  return AVERROR(ENOMEM);
524 
525  if (!(s->temp = av_calloc(s->nb_threads, sizeof(s->temp[0]))))
526  return AVERROR(ENOMEM);
527 
528  for (int i = 0; i < s->nb_threads; i++) {
529  if (!(s->temp[i] = av_calloc(s->width, sizeof(float))))
530  return AVERROR(ENOMEM);
531  }
532 
533  return 0;
534 }
535 
537 {
538  AVFilterContext *ctx = fs->parent;
539  VIFContext *s = fs->opaque;
540  AVFilterLink *outlink = ctx->outputs[0];
541  AVFrame *out_frame, *main_frame = NULL, *ref_frame = NULL;
542  int ret;
543 
544  ret = ff_framesync_dualinput_get(fs, &main_frame, &ref_frame);
545  if (ret < 0)
546  return ret;
547 
548  if (ctx->is_disabled || !ref_frame) {
549  out_frame = main_frame;
550  } else {
551  out_frame = do_vif(ctx, main_frame, ref_frame);
552  }
553 
554  out_frame->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
555 
556  return ff_filter_frame(outlink, out_frame);
557 }
558 
559 
560 static int config_output(AVFilterLink *outlink)
561 {
562  AVFilterContext *ctx = outlink->src;
563  VIFContext *s = ctx->priv;
564  AVFilterLink *mainlink = ctx->inputs[0];
565  FFFrameSyncIn *in;
566  int ret;
567 
568  outlink->w = mainlink->w;
569  outlink->h = mainlink->h;
570  outlink->time_base = mainlink->time_base;
571  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
572  outlink->frame_rate = mainlink->frame_rate;
573  if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
574  return ret;
575 
576  in = s->fs.in;
577  in[0].time_base = mainlink->time_base;
578  in[1].time_base = ctx->inputs[1]->time_base;
579  in[0].sync = 2;
580  in[0].before = EXT_STOP;
581  in[0].after = EXT_STOP;
582  in[1].sync = 1;
583  in[1].before = EXT_STOP;
584  in[1].after = EXT_STOP;
585  s->fs.opaque = s;
586  s->fs.on_event = process_frame;
587 
588  return ff_framesync_configure(&s->fs);
589 }
590 
592 {
593  VIFContext *s = ctx->priv;
594  return ff_framesync_activate(&s->fs);
595 }
596 
598 {
599  VIFContext *s = ctx->priv;
600 
601  if (s->nb_frames > 0) {
602  for (int i = 0; i < 4; i++)
603  av_log(ctx, AV_LOG_INFO, "VIF scale=%d average:%f min:%f: max:%f\n",
604  i, s->vif_sum[i] / s->nb_frames, s->vif_min[i], s->vif_max[i]);
605  }
606 
607  for (int i = 0; i < NUM_DATA_BUFS; i++)
608  av_freep(&s->data_buf[i]);
609 
610  av_freep(&s->ref_data);
611  av_freep(&s->main_data);
612 
613  for (int i = 0; i < s->nb_threads && s->temp; i++)
614  av_freep(&s->temp[i]);
615 
616  av_freep(&s->temp);
617 
618  ff_framesync_uninit(&s->fs);
619 }
620 
621 static const AVFilterPad vif_inputs[] = {
622  {
623  .name = "main",
624  .type = AVMEDIA_TYPE_VIDEO,
625  },{
626  .name = "reference",
627  .type = AVMEDIA_TYPE_VIDEO,
628  .config_props = config_input_ref,
629  },
630 };
631 
632 static const AVFilterPad vif_outputs[] = {
633  {
634  .name = "default",
635  .type = AVMEDIA_TYPE_VIDEO,
636  .config_props = config_output,
637  },
638 };
639 
641  .name = "vif",
642  .description = NULL_IF_CONFIG_SMALL("Calculate the VIF between two video streams."),
643  .uninit = uninit,
644  .query_formats = query_formats,
645  .priv_size = sizeof(VIFContext),
646  .priv_class = &vif_class,
647  .activate = activate,
651 };
VIFContext::vif_sum
double vif_sum[4]
Definition: vf_vif.c:54
ThreadData::src_stride
int src_stride
Definition: vf_vif.c:95
vif_outputs
static const AVFilterPad vif_outputs[]
Definition: vf_vif.c:632
FFFrameSyncIn::time_base
AVRational time_base
Time base for the incoming frames.
Definition: framesync.h:96
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:124
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
set_meta
static void set_meta(AVDictionary **metadata, int chan, const char *key, const char *fmt, double val)
Definition: af_astats.c:401
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ThreadData::filter_width
int filter_width
Definition: vf_vif.c:97
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:290
VIFContext::vif_max
double vif_max[4]
Definition: vf_vif.c:56
VIFContext::fs
FFFrameSync fs
Definition: vf_vif.c:44
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
log2f
#define log2f(x)
Definition: libm.h:409
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
ThreadData::dst_stride
int dst_stride
Definition: vf_vif.c:96
process_frame
static int process_frame(FFFrameSync *fs)
Definition: vf_vif.c:536
VIFContext::nb_threads
int nb_threads
Definition: vf_vif.c:48
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
offset_fn
#define offset_fn(type, bits)
Definition: vf_vif.c:398
vif_xx_yy_xy
static void vif_xx_yy_xy(const float *x, const float *y, float *xx, float *yy, float *xy, int w, int h)
Definition: vf_vif.c:185
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:396
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_vif.c:597
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:247
VIFContext::height
int height
Definition: vf_vif.c:47
vif_filter1d_table
static const float vif_filter1d_table[4][17]
Definition: vf_vif.c:70
float.h
ThreadData::w
int w
Definition: vf_blend.c:59
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:30
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_vif.c:472
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
FFFrameSync
Frame sync structure.
Definition: framesync.h:146
vif_filter1d
static int vif_filter1d(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vif.c:209
vif_filter1d_width1
static const uint8_t vif_filter1d_width1[4]
Definition: vf_vif.c:68
video.h
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:369
VIFContext::desc
const AVPixFmtDescriptor * desc
Definition: vf_vif.c:45
formats.h
EXT_STOP
@ EXT_STOP
Completely stop all streams with this one.
Definition: framesync.h:65
FFFrameSyncIn
Input stream structure.
Definition: framesync.h:81
VIFContext::main_data
float * main_data
Definition: vf_vif.c:53
vif_dec2
static void vif_dec2(const float *src, float *dst, int w, int h, int src_stride, int dst_stride)
Definition: vf_vif.c:101
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:373
FFFrameSyncIn::sync
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events.
Definition: framesync.h:139
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_vf_vif
const AVFilter ff_vf_vif
Definition: vf_vif.c:640
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:257
g
const char * g
Definition: vf_curves.c:117
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2041
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:372
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
do_vif
static AVFrame * do_vif(AVFilterContext *ctx, AVFrame *main, const AVFrame *ref)
Definition: vf_vif.c:439
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
key
const char * key
Definition: hwcontext_opencl.c:168
f
#define f(width, name)
Definition: cbs_vp9.c:255
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
ThreadData::h
int h
Definition: vf_blend.c:59
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:370
ThreadData::dst
AVFrame * dst
Definition: vf_blend.c:56
main
int main(int argc, char *argv[])
Definition: avio_list_dir.c:112
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
isnan
#define isnan(x)
Definition: libm.h:340
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
VIFContext::vif_min
double vif_min[4]
Definition: vf_vif.c:55
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
ThreadData::src
const float * src
Definition: vf_vif.c:92
ThreadData::temp
float ** temp
Definition: vf_vif.c:98
VIFContext::factor
float factor
Definition: vf_vif.c:49
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
internal.h
compute_vif2
static int compute_vif2(AVFilterContext *ctx, const float *ref, const float *main, int w, int h, int ref_stride, int main_stride, float *score, float *const data_buf[NUM_DATA_BUFS], float **temp, int gnb_threads)
Definition: vf_vif.c:287
VIFContext::width
int width
Definition: vf_vif.c:46
i
int i
Definition: input.c:406
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:804
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
NUM_DATA_BUFS
#define NUM_DATA_BUFS
Definition: vf_vif.c:40
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_vif.c:560
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
vif_statistic
static void vif_statistic(const float *mu1_sq, const float *mu2_sq, const float *mu1_mu2, const float *xx_filt, const float *yy_filt, const float *xy_filt, float *num, float *den, int w, int h)
Definition: vf_vif.c:112
AVFilter
Filter definition.
Definition: avfilter.h:149
ret
ret
Definition: filter_design.txt:187
VIFContext::nb_frames
uint64_t nb_frames
Definition: vf_vif.c:57
ff_framesync_init
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
Definition: framesync.c:84
VIFContext::ref_data
float * ref_data
Definition: vf_vif.c:52
FFFrameSyncIn::before
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
Definition: framesync.h:86
VIFContext::temp
float ** temp
Definition: vf_vif.c:51
PF
#define PF(suf)
framesync.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
vif_inputs
static const AVFilterPad vif_inputs[]
Definition: vf_vif.c:621
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
temp
else temp
Definition: vf_mcdeint.c:256
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(vif)
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
VIFContext::data_buf
float * data_buf[NUM_DATA_BUFS]
Definition: vf_vif.c:50
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
activate
static int activate(AVFilterContext *ctx)
Definition: vf_vif.c:591
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
d
d
Definition: ffmpeg_filter.c:156
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:138
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FFFrameSyncIn::after
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
Definition: framesync.h:91
h
h
Definition: vp9dsp_template.c:2038
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:341
avstring.h
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:376
vif_options
static const AVOption vif_options[]
Definition: vf_vif.c:62
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:371
drawutils.h
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
ThreadData::filter
const float * filter
Definition: vf_vif.c:91
snprintf
#define snprintf
Definition: snprintf.h:34
VIFContext
Definition: vf_vif.c:42
config_input_ref
static int config_input_ref(AVFilterLink *inlink)
Definition: vf_vif.c:489