FFmpeg
vf_vif.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
3  * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
4  * Copyright (c) 2021 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Calculate VIF between two input videos.
26  */
27 
28 #include <float.h>
29 
30 #include "libavutil/avstring.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avfilter.h"
34 #include "framesync.h"
35 #include "drawutils.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 #define NUM_DATA_BUFS 13
41 
42 typedef struct VIFContext {
43  const AVClass *class;
46  int width;
47  int height;
49  float factor;
51  float **temp;
52  float *ref_data;
53  float *main_data;
54  double vif_sum[4];
55  double vif_min[4];
56  double vif_max[4];
57  uint64_t nb_frames;
58 } VIFContext;
59 
60 #define OFFSET(x) offsetof(VIFContext, x)
61 
62 static const AVOption vif_options[] = {
63  { NULL }
64 };
65 
67 
68 static const uint8_t vif_filter1d_width1[4] = { 17, 9, 5, 3 };
69 
70 static const float vif_filter1d_table[4][17] =
71 {
72  {
73  0.00745626912, 0.0142655009, 0.0250313189, 0.0402820669, 0.0594526194,
74  0.0804751068, 0.0999041125, 0.113746084, 0.118773937, 0.113746084,
75  0.0999041125, 0.0804751068, 0.0594526194, 0.0402820669, 0.0250313189,
76  0.0142655009, 0.00745626912
77  },
78  {
79  0.0189780835, 0.0558981746, 0.120920904, 0.192116052, 0.224173605,
80  0.192116052, 0.120920904, 0.0558981746, 0.0189780835
81  },
82  {
83  0.054488685, 0.244201347, 0.402619958, 0.244201347, 0.054488685
84  },
85  {
86  0.166378498, 0.667243004, 0.166378498
87  }
88 };
89 
90 typedef struct ThreadData {
91  const float *filter;
92  const float *src;
93  float *dst;
94  int w, h;
98  float **temp;
99 } ThreadData;
100 
101 static void vif_dec2(const float *src, float *dst, int w, int h,
102  int src_stride, int dst_stride)
103 {
104  const int dst_px_stride = dst_stride / 2;
105 
106  for (int i = 0; i < h / 2; i++) {
107  for (int j = 0; j < w / 2; j++)
108  dst[i * dst_px_stride + j] = src[(i * 2) * src_stride + (j * 2)];
109  }
110 }
111 
112 static void vif_statistic(const float *mu1_sq, const float *mu2_sq,
113  const float *mu1_mu2, const float *xx_filt,
114  const float *yy_filt, const float *xy_filt,
115  float *num, float *den, int w, int h)
116 {
117  static const float sigma_nsq = 2;
118  float mu1_sq_val, mu2_sq_val, mu1_mu2_val, xx_filt_val, yy_filt_val, xy_filt_val;
119  float sigma1_sq, sigma2_sq, sigma12, g, sv_sq, eps = 1.0e-10f;
120  float gain_limit = 100.f;
121  float num_val, den_val;
122  float accum_num = 0.0f;
123  float accum_den = 0.0f;
124 
125  for (int i = 0; i < h; i++) {
126  float accum_inner_num = 0.f;
127  float accum_inner_den = 0.f;
128 
129  for (int j = 0; j < w; j++) {
130  mu1_sq_val = mu1_sq[i * w + j];
131  mu2_sq_val = mu2_sq[i * w + j];
132  mu1_mu2_val = mu1_mu2[i * w + j];
133  xx_filt_val = xx_filt[i * w + j];
134  yy_filt_val = yy_filt[i * w + j];
135  xy_filt_val = xy_filt[i * w + j];
136 
137  sigma1_sq = xx_filt_val - mu1_sq_val;
138  sigma2_sq = yy_filt_val - mu2_sq_val;
139  sigma12 = xy_filt_val - mu1_mu2_val;
140 
141  sigma1_sq = FFMAX(sigma1_sq, 0.0f);
142  sigma2_sq = FFMAX(sigma2_sq, 0.0f);
143  sigma12 = FFMAX(sigma12, 0.0f);
144 
145  g = sigma12 / (sigma1_sq + eps);
146  sv_sq = sigma2_sq - g * sigma12;
147 
148  if (sigma1_sq < eps) {
149  g = 0.0f;
150  sv_sq = sigma2_sq;
151  sigma1_sq = 0.0f;
152  }
153 
154  if (sigma2_sq < eps) {
155  g = 0.0f;
156  sv_sq = 0.0f;
157  }
158 
159  if (g < 0.0f) {
160  sv_sq = sigma2_sq;
161  g = 0.0f;
162  }
163  sv_sq = FFMAX(sv_sq, eps);
164 
165  g = FFMIN(g, gain_limit);
166 
167  num_val = log2f(1.0f + g * g * sigma1_sq / (sv_sq + sigma_nsq));
168  den_val = log2f(1.0f + sigma1_sq / sigma_nsq);
169 
170  if (isnan(den_val))
171  num_val = den_val = 1.f;
172 
173  accum_inner_num += num_val;
174  accum_inner_den += den_val;
175  }
176 
177  accum_num += accum_inner_num;
178  accum_den += accum_inner_den;
179  }
180 
181  num[0] = accum_num;
182  den[0] = accum_den;
183 }
184 
185 static void vif_xx_yy_xy(const float *x, const float *y, float *xx, float *yy,
186  float *xy, int w, int h)
187 {
188  for (int i = 0; i < h; i++) {
189  for (int j = 0; j < w; j++) {
190  float xval = x[j];
191  float yval = y[j];
192  float xxval = xval * xval;
193  float yyval = yval * yval;
194  float xyval = xval * yval;
195 
196  xx[j] = xxval;
197  yy[j] = yyval;
198  xy[j] = xyval;
199  }
200 
201  xx += w;
202  yy += w;
203  xy += w;
204  x += w;
205  y += w;
206  }
207 }
208 
209 static int vif_filter1d(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
210 {
211  ThreadData *td = arg;
212  const float *filter = td->filter;
213  const float *src = td->src;
214  float *dst = td->dst;
215  int w = td->w;
216  int h = td->h;
217  int src_stride = td->src_stride;
218  int dst_stride = td->dst_stride;
219  int filt_w = td->filter_width;
220  float *temp = td->temp[jobnr];
221  const int slice_start = (h * jobnr) / nb_jobs;
222  const int slice_end = (h * (jobnr+1)) / nb_jobs;
223 
224  for (int i = slice_start; i < slice_end; i++) {
225  /** Vertical pass. */
226  for (int j = 0; j < w; j++) {
227  float sum = 0.f;
228 
229  if (i >= filt_w / 2 && i < h - filt_w / 2 - 1) {
230  for (int filt_i = 0; filt_i < filt_w; filt_i++) {
231  const float filt_coeff = filter[filt_i];
232  float img_coeff;
233  int ii = i - filt_w / 2 + filt_i;
234 
235  img_coeff = src[ii * src_stride + j];
236  sum += filt_coeff * img_coeff;
237  }
238  } else {
239  for (int filt_i = 0; filt_i < filt_w; filt_i++) {
240  const float filt_coeff = filter[filt_i];
241  int ii = i - filt_w / 2 + filt_i;
242  float img_coeff;
243 
244  ii = ii < 0 ? -ii : (ii >= h ? 2 * h - ii - 1 : ii);
245 
246  img_coeff = src[ii * src_stride + j];
247  sum += filt_coeff * img_coeff;
248  }
249  }
250 
251  temp[j] = sum;
252  }
253 
254  /** Horizontal pass. */
255  for (int j = 0; j < w; j++) {
256  float sum = 0.f;
257 
258  if (j >= filt_w / 2 && j < w - filt_w / 2 - 1) {
259  for (int filt_j = 0; filt_j < filt_w; filt_j++) {
260  const float filt_coeff = filter[filt_j];
261  int jj = j - filt_w / 2 + filt_j;
262  float img_coeff;
263 
264  img_coeff = temp[jj];
265  sum += filt_coeff * img_coeff;
266  }
267  } else {
268  for (int filt_j = 0; filt_j < filt_w; filt_j++) {
269  const float filt_coeff = filter[filt_j];
270  int jj = j - filt_w / 2 + filt_j;
271  float img_coeff;
272 
273  jj = jj < 0 ? -jj : (jj >= w ? 2 * w - jj - 1 : jj);
274 
275  img_coeff = temp[jj];
276  sum += filt_coeff * img_coeff;
277  }
278  }
279 
280  dst[i * dst_stride + j] = sum;
281  }
282  }
283 
284  return 0;
285 }
286 
288  const float *ref, const float *main, int w, int h,
289  int ref_stride, int main_stride, float *score,
290  float *const data_buf[NUM_DATA_BUFS], float **temp,
291  int gnb_threads)
292 {
293  ThreadData td;
294  float *ref_scale = data_buf[0];
295  float *main_scale = data_buf[1];
296  float *ref_sq = data_buf[2];
297  float *main_sq = data_buf[3];
298  float *ref_main = data_buf[4];
299  float *mu1 = data_buf[5];
300  float *mu2 = data_buf[6];
301  float *mu1_sq = data_buf[7];
302  float *mu2_sq = data_buf[8];
303  float *mu1_mu2 = data_buf[9];
304  float *ref_sq_filt = data_buf[10];
305  float *main_sq_filt = data_buf[11];
306  float *ref_main_filt = data_buf[12];
307 
308  float *curr_ref_scale = (float *)ref;
309  float *curr_main_scale = (float *)main;
310  int curr_ref_stride = ref_stride;
311  int curr_main_stride = main_stride;
312 
313  float num = 0.f;
314  float den = 0.f;
315 
316  for (int scale = 0; scale < 4; scale++) {
317  const float *filter = vif_filter1d_table[scale];
318  int filter_width = vif_filter1d_width1[scale];
319  const int nb_threads = FFMIN(h, gnb_threads);
320  int buf_valid_w = w;
321  int buf_valid_h = h;
322 
323  td.filter = filter;
324  td.filter_width = filter_width;
325 
326  if (scale > 0) {
327  td.src = curr_ref_scale;
328  td.dst = mu1;
329  td.w = w;
330  td.h = h;
331  td.src_stride = curr_ref_stride;
332  td.dst_stride = w;
333  td.temp = temp;
334  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
335 
336  td.src = curr_main_scale;
337  td.dst = mu2;
338  td.src_stride = curr_main_stride;
339  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
340 
341  vif_dec2(mu1, ref_scale, buf_valid_w, buf_valid_h, w, w);
342  vif_dec2(mu2, main_scale, buf_valid_w, buf_valid_h, w, w);
343 
344  w = buf_valid_w / 2;
345  h = buf_valid_h / 2;
346 
347  buf_valid_w = w;
348  buf_valid_h = h;
349 
350  curr_ref_scale = ref_scale;
351  curr_main_scale = main_scale;
352 
353  curr_ref_stride = w;
354  curr_main_stride = w;
355  }
356 
357  td.src = curr_ref_scale;
358  td.dst = mu1;
359  td.w = w;
360  td.h = h;
361  td.src_stride = curr_ref_stride;
362  td.dst_stride = w;
363  td.temp = temp;
364  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
365 
366  td.src = curr_main_scale;
367  td.dst = mu2;
368  td.src_stride = curr_main_stride;
369  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
370 
371  vif_xx_yy_xy(mu1, mu2, mu1_sq, mu2_sq, mu1_mu2, w, h);
372 
373  vif_xx_yy_xy(curr_ref_scale, curr_main_scale, ref_sq, main_sq, ref_main, w, h);
374 
375  td.src = ref_sq;
376  td.dst = ref_sq_filt;
377  td.src_stride = w;
378  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
379 
380  td.src = main_sq;
381  td.dst = main_sq_filt;
382  td.src_stride = w;
383  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
384 
385  td.src = ref_main;
386  td.dst = ref_main_filt;
387  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
388 
389  vif_statistic(mu1_sq, mu2_sq, mu1_mu2, ref_sq_filt, main_sq_filt,
390  ref_main_filt, &num, &den, w, h);
391 
392  score[scale] = den <= FLT_EPSILON ? 1.f : num / den;
393  }
394 
395  return 0;
396 }
397 
398 #define offset_fn(type, bits) \
399 static void offset_##bits##bit(VIFContext *s, \
400  const AVFrame *ref, \
401  AVFrame *main, int stride)\
402 { \
403  int w = s->width; \
404  int h = s->height; \
405  \
406  int ref_stride = ref->linesize[0]; \
407  int main_stride = main->linesize[0]; \
408  \
409  const type *ref_ptr = (const type *) ref->data[0]; \
410  const type *main_ptr = (const type *) main->data[0]; \
411  \
412  const float factor = s->factor; \
413  \
414  float *ref_ptr_data = s->ref_data; \
415  float *main_ptr_data = s->main_data; \
416  \
417  for (int i = 0; i < h; i++) { \
418  for (int j = 0; j < w; j++) { \
419  ref_ptr_data[j] = ref_ptr[j] * factor - 128.f; \
420  main_ptr_data[j] = main_ptr[j] * factor - 128.f; \
421  } \
422  ref_ptr += ref_stride / sizeof(type); \
423  ref_ptr_data += w; \
424  main_ptr += main_stride / sizeof(type); \
425  main_ptr_data += w; \
426  } \
427 }
428 
429 offset_fn(uint8_t, 8)
430 offset_fn(uint16_t, 16)
431 
432 static void set_meta(AVDictionary **metadata, const char *key, float d)
433 {
434  char value[257];
435  snprintf(value, sizeof(value), "%f", d);
436  av_dict_set(metadata, key, value, 0);
437 }
438 
440 {
441  VIFContext *s = ctx->priv;
442  AVDictionary **metadata = &main->metadata;
443  float score[4];
444 
445  s->factor = 1.f / (1 << (s->desc->comp[0].depth - 8));
446  if (s->desc->comp[0].depth <= 8) {
447  offset_8bit(s, ref, main, s->width);
448  } else {
449  offset_16bit(s, ref, main, s->width);
450  }
451 
452  compute_vif2(ctx, s->ref_data, s->main_data,
453  s->width, s->height, s->width, s->width,
454  score, s->data_buf, s->temp, s->nb_threads);
455 
456  set_meta(metadata, "lavfi.vif.scale.0", score[0]);
457  set_meta(metadata, "lavfi.vif.scale.1", score[1]);
458  set_meta(metadata, "lavfi.vif.scale.2", score[2]);
459  set_meta(metadata, "lavfi.vif.scale.3", score[3]);
460 
461  for (int i = 0; i < 4; i++) {
462  s->vif_min[i] = FFMIN(s->vif_min[i], score[i]);
463  s->vif_max[i] = FFMAX(s->vif_max[i], score[i]);
464  s->vif_sum[i] += score[i];
465  }
466 
467  s->nb_frames++;
468 
469  return main;
470 }
471 
472 static const enum AVPixelFormat pix_fmts[] = {
479 #define PF(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
480  PF(P9), PF(P10), PF(P12), PF(P14), PF(P16),
482 };
483 
485 {
486  AVFilterContext *ctx = inlink->dst;
487  VIFContext *s = ctx->priv;
488 
489  if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
490  ctx->inputs[0]->h != ctx->inputs[1]->h) {
491  av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
492  return AVERROR(EINVAL);
493  }
494 
495  s->desc = av_pix_fmt_desc_get(inlink->format);
496  s->width = ctx->inputs[0]->w;
497  s->height = ctx->inputs[0]->h;
498  s->nb_threads = ff_filter_get_nb_threads(ctx);
499 
500  for (int i = 0; i < 4; i++) {
501  s->vif_min[i] = DBL_MAX;
502  s->vif_max[i] = -DBL_MAX;
503  }
504 
505  for (int i = 0; i < NUM_DATA_BUFS; i++) {
506  if (!(s->data_buf[i] = av_calloc(s->width, s->height * sizeof(float))))
507  return AVERROR(ENOMEM);
508  }
509 
510  if (!(s->ref_data = av_calloc(s->width, s->height * sizeof(float))))
511  return AVERROR(ENOMEM);
512 
513  if (!(s->main_data = av_calloc(s->width, s->height * sizeof(float))))
514  return AVERROR(ENOMEM);
515 
516  if (!(s->temp = av_calloc(s->nb_threads, sizeof(s->temp[0]))))
517  return AVERROR(ENOMEM);
518 
519  for (int i = 0; i < s->nb_threads; i++) {
520  if (!(s->temp[i] = av_calloc(s->width, sizeof(float))))
521  return AVERROR(ENOMEM);
522  }
523 
524  return 0;
525 }
526 
528 {
529  AVFilterContext *ctx = fs->parent;
530  VIFContext *s = fs->opaque;
531  AVFilterLink *outlink = ctx->outputs[0];
532  AVFrame *out_frame, *main_frame = NULL, *ref_frame = NULL;
533  int ret;
534 
535  ret = ff_framesync_dualinput_get(fs, &main_frame, &ref_frame);
536  if (ret < 0)
537  return ret;
538 
539  if (ctx->is_disabled || !ref_frame) {
540  out_frame = main_frame;
541  } else {
542  out_frame = do_vif(ctx, main_frame, ref_frame);
543  }
544 
545  out_frame->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
546 
547  return ff_filter_frame(outlink, out_frame);
548 }
549 
550 
551 static int config_output(AVFilterLink *outlink)
552 {
553  AVFilterContext *ctx = outlink->src;
554  VIFContext *s = ctx->priv;
555  AVFilterLink *mainlink = ctx->inputs[0];
556  FFFrameSyncIn *in;
557  int ret;
558 
559  outlink->w = mainlink->w;
560  outlink->h = mainlink->h;
561  outlink->time_base = mainlink->time_base;
562  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
563  outlink->frame_rate = mainlink->frame_rate;
564  if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
565  return ret;
566 
567  in = s->fs.in;
568  in[0].time_base = mainlink->time_base;
569  in[1].time_base = ctx->inputs[1]->time_base;
570  in[0].sync = 2;
571  in[0].before = EXT_STOP;
572  in[0].after = EXT_STOP;
573  in[1].sync = 1;
574  in[1].before = EXT_STOP;
575  in[1].after = EXT_STOP;
576  s->fs.opaque = s;
577  s->fs.on_event = process_frame;
578 
579  return ff_framesync_configure(&s->fs);
580 }
581 
583 {
584  VIFContext *s = ctx->priv;
585  return ff_framesync_activate(&s->fs);
586 }
587 
589 {
590  VIFContext *s = ctx->priv;
591 
592  if (s->nb_frames > 0) {
593  for (int i = 0; i < 4; i++)
594  av_log(ctx, AV_LOG_INFO, "VIF scale=%d average:%f min:%f: max:%f\n",
595  i, s->vif_sum[i] / s->nb_frames, s->vif_min[i], s->vif_max[i]);
596  }
597 
598  for (int i = 0; i < NUM_DATA_BUFS; i++)
599  av_freep(&s->data_buf[i]);
600 
601  av_freep(&s->ref_data);
602  av_freep(&s->main_data);
603 
604  for (int i = 0; i < s->nb_threads && s->temp; i++)
605  av_freep(&s->temp[i]);
606 
607  av_freep(&s->temp);
608 
609  ff_framesync_uninit(&s->fs);
610 }
611 
612 static const AVFilterPad vif_inputs[] = {
613  {
614  .name = "main",
615  .type = AVMEDIA_TYPE_VIDEO,
616  },{
617  .name = "reference",
618  .type = AVMEDIA_TYPE_VIDEO,
619  .config_props = config_input_ref,
620  },
621 };
622 
623 static const AVFilterPad vif_outputs[] = {
624  {
625  .name = "default",
626  .type = AVMEDIA_TYPE_VIDEO,
627  .config_props = config_output,
628  },
629 };
630 
632  .name = "vif",
633  .description = NULL_IF_CONFIG_SMALL("Calculate the VIF between two video streams."),
634  .uninit = uninit,
635  .priv_size = sizeof(VIFContext),
636  .priv_class = &vif_class,
637  .activate = activate,
644 };
VIFContext::vif_sum
double vif_sum[4]
Definition: vf_vif.c:54
ThreadData::src_stride
int src_stride
Definition: vf_vif.c:95
vif_outputs
static const AVFilterPad vif_outputs[]
Definition: vf_vif.c:623
FFFrameSyncIn::time_base
AVRational time_base
Time base for the incoming frames.
Definition: framesync.h:96
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:119
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ThreadData::filter_width
int filter_width
Definition: vf_vif.c:97
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:285
VIFContext::vif_max
double vif_max[4]
Definition: vf_vif.c:56
VIFContext::fs
FFFrameSync fs
Definition: vf_vif.c:44
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
log2f
#define log2f(x)
Definition: libm.h:409
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
ThreadData::dst_stride
int dst_stride
Definition: vf_vif.c:96
process_frame
static int process_frame(FFFrameSync *fs)
Definition: vf_vif.c:527
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:171
VIFContext::nb_threads
int nb_threads
Definition: vf_vif.c:48
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
offset_fn
#define offset_fn(type, bits)
Definition: vf_vif.c:398
vif_xx_yy_xy
static void vif_xx_yy_xy(const float *x, const float *y, float *xx, float *yy, float *xy, int w, int h)
Definition: vf_vif.c:185
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:424
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_vif.c:588
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:247
VIFContext::height
int height
Definition: vf_vif.c:47
vif_filter1d_table
static const float vif_filter1d_table[4][17]
Definition: vf_vif.c:70
float.h
ThreadData::w
int w
Definition: vf_blend.c:86
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:30
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
FFFrameSync
Frame sync structure.
Definition: framesync.h:146
vif_filter1d
static int vif_filter1d(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vif.c:209
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_vif.c:472
vif_filter1d_width1
static const uint8_t vif_filter1d_width1[4]
Definition: vf_vif.c:68
video.h
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:384
VIFContext::desc
const AVPixFmtDescriptor * desc
Definition: vf_vif.c:45
formats.h
EXT_STOP
@ EXT_STOP
Completely stop all streams with this one.
Definition: framesync.h:65
FFFrameSyncIn
Input stream structure.
Definition: framesync.h:81
VIFContext::main_data
float * main_data
Definition: vf_vif.c:53
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1388
vif_dec2
static void vif_dec2(const float *src, float *dst, int w, int h, int src_stride, int dst_stride)
Definition: vf_vif.c:101
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:388
FFFrameSyncIn::sync
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events.
Definition: framesync.h:139
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_vf_vif
const AVFilter ff_vf_vif
Definition: vf_vif.c:631
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:257
g
const char * g
Definition: vf_curves.c:117
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2042
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:387
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
do_vif
static AVFrame * do_vif(AVFilterContext *ctx, AVFrame *main, const AVFrame *ref)
Definition: vf_vif.c:439
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
key
const char * key
Definition: hwcontext_opencl.c:168
f
#define f(width, name)
Definition: cbs_vp9.c:255
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
ThreadData::h
int h
Definition: vf_blend.c:86
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:385
ThreadData::dst
AVFrame * dst
Definition: vf_blend.c:83
main
int main(int argc, char *argv[])
Definition: avio_list_dir.c:112
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
isnan
#define isnan(x)
Definition: libm.h:340
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
VIFContext::vif_min
double vif_min[4]
Definition: vf_vif.c:55
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
ThreadData::src
const float * src
Definition: vf_vif.c:92
ThreadData::temp
float ** temp
Definition: vf_vif.c:98
VIFContext::factor
float factor
Definition: vf_vif.c:49
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
internal.h
compute_vif2
static int compute_vif2(AVFilterContext *ctx, const float *ref, const float *main, int w, int h, int ref_stride, int main_stride, float *score, float *const data_buf[NUM_DATA_BUFS], float **temp, int gnb_threads)
Definition: vf_vif.c:287
VIFContext::width
int width
Definition: vf_vif.c:46
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:803
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
NUM_DATA_BUFS
#define NUM_DATA_BUFS
Definition: vf_vif.c:40
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_vif.c:551
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
vif_statistic
static void vif_statistic(const float *mu1_sq, const float *mu2_sq, const float *mu1_mu2, const float *xx_filt, const float *yy_filt, const float *xy_filt, float *num, float *den, int w, int h)
Definition: vf_vif.c:112
AVFilter
Filter definition.
Definition: avfilter.h:165
ret
ret
Definition: filter_design.txt:187
VIFContext::nb_frames
uint64_t nb_frames
Definition: vf_vif.c:57
ff_framesync_init
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
Definition: framesync.c:79
VIFContext::ref_data
float * ref_data
Definition: vf_vif.c:52
set_meta
static void set_meta(AVDictionary **metadata, int chan, const char *key, const char *fmt, float val)
Definition: af_aspectralstats.c:153
FFFrameSyncIn::before
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
Definition: framesync.h:86
VIFContext::temp
float ** temp
Definition: vf_vif.c:51
PF
#define PF(suf)
framesync.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
vif_inputs
static const AVFilterPad vif_inputs[]
Definition: vf_vif.c:612
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:137
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
temp
else temp
Definition: vf_mcdeint.c:248
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(vif)
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
VIFContext::data_buf
float * data_buf[NUM_DATA_BUFS]
Definition: vf_vif.c:50
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
activate
static int activate(AVFilterContext *ctx)
Definition: vf_vif.c:582
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
d
d
Definition: ffmpeg_filter.c:153
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:154
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FFFrameSyncIn::after
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
Definition: framesync.h:91
h
h
Definition: vp9dsp_template.c:2038
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:336
avstring.h
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:371
vif_options
static const AVOption vif_options[]
Definition: vf_vif.c:62
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:386
drawutils.h
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
ThreadData::filter
const float * filter
Definition: vf_vif.c:91
snprintf
#define snprintf
Definition: snprintf.h:34
VIFContext
Definition: vf_vif.c:42
config_input_ref
static int config_input_ref(AVFilterLink *inlink)
Definition: vf_vif.c:484