FFmpeg
vf_libvmaf.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
3  * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Calculate the VMAF between two input videos.
25  */
26 
27 #include <libvmaf.h>
28 
29 #include "libavutil/avstring.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "avfilter.h"
33 #include "drawutils.h"
34 #include "formats.h"
35 #include "framesync.h"
36 #include "internal.h"
37 #include "video.h"
38 
39 typedef struct LIBVMAFContext {
40  const AVClass *class;
42  char *model_path;
43  char *log_path;
44  char *log_fmt;
47  int psnr;
48  int ssim;
49  int ms_ssim;
50  char *pool;
51  int n_threads;
54  char *model_cfg;
55  char *feature_cfg;
56  VmafContext *vmaf;
57  VmafModel **model;
58  unsigned model_cnt;
59  unsigned frame_cnt;
60  unsigned bpc;
62 
63 #define OFFSET(x) offsetof(LIBVMAFContext, x)
64 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
65 
66 static const AVOption libvmaf_options[] = {
67  {"model_path", "use model='path=...'.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
68  {"log_path", "Set the file path to be used to write log.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
69  {"log_fmt", "Set the format of the log (csv, json, xml, or sub).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str="xml"}, 0, 1, FLAGS},
70  {"enable_transform", "use model='enable_transform=true'.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
71  {"phone_model", "use model='enable_transform=true'.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
72  {"psnr", "use feature='name=psnr'.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
73  {"ssim", "use feature='name=float_ssim'.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
74  {"ms_ssim", "use feature='name=float_ms_ssim'.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
75  {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
76  {"n_threads", "Set number of threads to be used when computing vmaf.", OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS},
77  {"n_subsample", "Set interval for frame subsampling used when computing vmaf.", OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS},
78  {"enable_conf_interval", "model='enable_conf_interval=true'.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS|AV_OPT_FLAG_DEPRECATED},
79  {"model", "Set the model to be used for computing vmaf.", OFFSET(model_cfg), AV_OPT_TYPE_STRING, {.str="version=vmaf_v0.6.1"}, 0, 1, FLAGS},
80  {"feature", "Set the feature to be used for computing vmaf.", OFFSET(feature_cfg), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
81  { NULL }
82 };
83 
85 
86 static enum VmafPixelFormat pix_fmt_map(enum AVPixelFormat av_pix_fmt)
87 {
88  switch (av_pix_fmt) {
89  case AV_PIX_FMT_YUV420P:
93  return VMAF_PIX_FMT_YUV420P;
94  case AV_PIX_FMT_YUV422P:
98  return VMAF_PIX_FMT_YUV422P;
99  case AV_PIX_FMT_YUV444P:
103  return VMAF_PIX_FMT_YUV444P;
104  default:
105  return VMAF_PIX_FMT_UNKNOWN;
106  }
107 }
108 
109 static int copy_picture_data(AVFrame *src, VmafPicture *dst, unsigned bpc)
110 {
111  const int bytes_per_value = bpc > 8 ? 2 : 1;
112  int err = vmaf_picture_alloc(dst, pix_fmt_map(src->format), bpc,
113  src->width, src->height);
114  if (err)
115  return AVERROR(ENOMEM);
116 
117  for (unsigned i = 0; i < 3; i++) {
118  uint8_t *src_data = src->data[i];
119  uint8_t *dst_data = dst->data[i];
120  for (unsigned j = 0; j < dst->h[i]; j++) {
121  memcpy(dst_data, src_data, bytes_per_value * dst->w[i]);
122  src_data += src->linesize[i];
123  dst_data += dst->stride[i];
124  }
125  }
126 
127  return 0;
128 }
129 
130 static int do_vmaf(FFFrameSync *fs)
131 {
132  AVFilterContext *ctx = fs->parent;
133  LIBVMAFContext *s = ctx->priv;
134  VmafPicture pic_ref, pic_dist;
135  AVFrame *ref, *dist;
136  int err = 0;
137 
138  int ret = ff_framesync_dualinput_get(fs, &dist, &ref);
139  if (ret < 0)
140  return ret;
141  if (ctx->is_disabled || !ref)
142  return ff_filter_frame(ctx->outputs[0], dist);
143 
144  err = copy_picture_data(ref, &pic_ref, s->bpc);
145  if (err) {
146  av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
147  return AVERROR(ENOMEM);
148  }
149 
150  err = copy_picture_data(dist, &pic_dist, s->bpc);
151  if (err) {
152  av_log(s, AV_LOG_ERROR, "problem during vmaf_picture_alloc.\n");
153  vmaf_picture_unref(&pic_ref);
154  return AVERROR(ENOMEM);
155  }
156 
157  err = vmaf_read_pictures(s->vmaf, &pic_ref, &pic_dist, s->frame_cnt++);
158  if (err) {
159  av_log(s, AV_LOG_ERROR, "problem during vmaf_read_pictures.\n");
160  return AVERROR(EINVAL);
161  }
162 
163  return ff_filter_frame(ctx->outputs[0], dist);
164 }
165 
166 
167 static AVDictionary **delimited_dict_parse(char *str, unsigned *cnt)
168 {
169  AVDictionary **dict = NULL;
170  char *str_copy = NULL;
171  char *saveptr = NULL;
172  unsigned cnt2;
173  int err = 0;
174 
175  if (!str)
176  return NULL;
177 
178  cnt2 = 1;
179  for (char *p = str; *p; p++) {
180  if (*p == '|')
181  cnt2++;
182  }
183 
184  dict = av_calloc(cnt2, sizeof(*dict));
185  if (!dict)
186  goto fail;
187 
188  str_copy = av_strdup(str);
189  if (!str_copy)
190  goto fail;
191 
192  *cnt = 0;
193  for (unsigned i = 0; i < cnt2; i++) {
194  char *s = av_strtok(i == 0 ? str_copy : NULL, "|", &saveptr);
195  if (!s)
196  continue;
197  err = av_dict_parse_string(&dict[(*cnt)++], s, "=", ":", 0);
198  if (err)
199  goto fail;
200  }
201 
202  av_free(str_copy);
203  return dict;
204 
205 fail:
206  if (dict) {
207  for (unsigned i = 0; i < *cnt; i++) {
208  if (dict[i])
209  av_dict_free(&dict[i]);
210  }
211  av_free(dict);
212  }
213 
214  av_free(str_copy);
215  *cnt = 0;
216  return NULL;
217 }
218 
220 {
221  LIBVMAFContext *s = ctx->priv;
222  AVDictionary **dict = NULL;
223  unsigned dict_cnt;
224  int err = 0;
225 
226  if (!s->feature_cfg)
227  return 0;
228 
229  dict = delimited_dict_parse(s->feature_cfg, &dict_cnt);
230  if (!dict) {
232  "could not parse feature config: %s\n", s->feature_cfg);
233  return AVERROR(EINVAL);
234  }
235 
236  for (unsigned i = 0; i < dict_cnt; i++) {
237  char *feature_name = NULL;
238  VmafFeatureDictionary *feature_opts_dict = NULL;
239  const AVDictionaryEntry *e = NULL;
240 
241  while (e = av_dict_iterate(dict[i], e)) {
242  if (av_stristr(e->key, "name")) {
243  feature_name = e->value;
244  continue;
245  }
246 
247  err = vmaf_feature_dictionary_set(&feature_opts_dict, e->key,
248  e->value);
249  if (err) {
251  "could not set feature option: %s.%s=%s\n",
252  feature_name, e->key, e->value);
253  goto exit;
254  }
255  }
256 
257  err = vmaf_use_feature(s->vmaf, feature_name, feature_opts_dict);
258  if (err) {
260  "problem during vmaf_use_feature: %s\n", feature_name);
261  goto exit;
262  }
263  }
264 
265 exit:
266  for (unsigned i = 0; i < dict_cnt; i++) {
267  if (dict[i])
268  av_dict_free(&dict[i]);
269  }
270  av_free(dict);
271  return err;
272 }
273 
275 {
276  LIBVMAFContext *s = ctx->priv;
277  AVDictionary **dict;
278  unsigned dict_cnt;
279  int err = 0;
280 
281  if (!s->model_cfg) return 0;
282 
283  dict_cnt = 0;
284  dict = delimited_dict_parse(s->model_cfg, &dict_cnt);
285  if (!dict) {
287  "could not parse model config: %s\n", s->model_cfg);
288  return AVERROR(EINVAL);
289  }
290 
291  s->model_cnt = dict_cnt;
292  s->model = av_calloc(s->model_cnt, sizeof(*s->model));
293  if (!s->model)
294  return AVERROR(ENOMEM);
295 
296  for (unsigned i = 0; i < dict_cnt; i++) {
297  VmafModelConfig model_cfg = { 0 };
298  const AVDictionaryEntry *e = NULL;
299  char *version = NULL;
300  char *path = NULL;
301 
302  while (e = av_dict_iterate(dict[i], e)) {
303  if (av_stristr(e->key, "disable_clip")) {
304  model_cfg.flags |= av_stristr(e->value, "true") ?
305  VMAF_MODEL_FLAG_DISABLE_CLIP : 0;
306  continue;
307  }
308 
309  if (av_stristr(e->key, "enable_transform")) {
310  model_cfg.flags |= av_stristr(e->value, "true") ?
311  VMAF_MODEL_FLAG_ENABLE_TRANSFORM : 0;
312  continue;
313  }
314 
315  if (av_stristr(e->key, "name")) {
316  model_cfg.name = e->value;
317  continue;
318  }
319 
320  if (av_stristr(e->key, "version")) {
321  version = e->value;
322  continue;
323  }
324 
325  if (av_stristr(e->key, "path")) {
326  path = e->value;
327  continue;
328  }
329  }
330 
331  if (version) {
332  err = vmaf_model_load(&s->model[i], &model_cfg, version);
333  if (err) {
335  "could not load libvmaf model with version: %s\n",
336  version);
337  goto exit;
338  }
339  }
340 
341  if (path && !s->model[i]) {
342  err = vmaf_model_load_from_path(&s->model[i], &model_cfg, path);
343  if (err) {
345  "could not load libvmaf model with path: %s\n",
346  path);
347  goto exit;
348  }
349  }
350 
351  if (!s->model[i]) {
353  "could not load libvmaf model with config: %s\n",
354  s->model_cfg);
355  goto exit;
356  }
357 
358  while (e = av_dict_iterate(dict[i], e)) {
359  VmafFeatureDictionary *feature_opts_dict = NULL;
360  char *feature_opt = NULL;
361 
362  char *feature_name = av_strtok(e->key, ".", &feature_opt);
363  if (!feature_opt)
364  continue;
365 
366  err = vmaf_feature_dictionary_set(&feature_opts_dict,
367  feature_opt, e->value);
368  if (err) {
370  "could not set feature option: %s.%s=%s\n",
371  feature_name, feature_opt, e->value);
372  err = AVERROR(EINVAL);
373  goto exit;
374  }
375 
376  err = vmaf_model_feature_overload(s->model[i], feature_name,
377  feature_opts_dict);
378  if (err) {
380  "could not overload feature: %s\n", feature_name);
381  err = AVERROR(EINVAL);
382  goto exit;
383  }
384  }
385  }
386 
387  for (unsigned i = 0; i < s->model_cnt; i++) {
388  err = vmaf_use_features_from_model(s->vmaf, s->model[i]);
389  if (err) {
391  "problem during vmaf_use_features_from_model\n");
392  err = AVERROR(EINVAL);
393  goto exit;
394  }
395  }
396 
397 exit:
398  for (unsigned i = 0; i < dict_cnt; i++) {
399  if (dict[i])
400  av_dict_free(&dict[i]);
401  }
402  av_free(dict);
403  return err;
404 }
405 
406 static enum VmafLogLevel log_level_map(int log_level)
407 {
408  switch (log_level) {
409  case AV_LOG_QUIET:
410  return VMAF_LOG_LEVEL_NONE;
411  case AV_LOG_ERROR:
412  return VMAF_LOG_LEVEL_ERROR;
413  case AV_LOG_WARNING:
414  return VMAF_LOG_LEVEL_WARNING;
415  case AV_LOG_INFO:
416  return VMAF_LOG_LEVEL_INFO;
417  case AV_LOG_DEBUG:
418  return VMAF_LOG_LEVEL_DEBUG;
419  default:
420  return VMAF_LOG_LEVEL_INFO;
421  }
422 }
423 
425 {
426  LIBVMAFContext *s = ctx->priv;
427  VmafModel *model = NULL;
428  VmafModelCollection *model_collection = NULL;
429  enum VmafModelFlags flags = VMAF_MODEL_FLAGS_DEFAULT;
430  int err = 0;
431 
432  VmafModelConfig model_cfg = {
433  .name = "vmaf",
434  .flags = flags,
435  };
436 
437  if (s->enable_transform || s->phone_model)
438  flags |= VMAF_MODEL_FLAG_ENABLE_TRANSFORM;
439 
440  if (!s->model_path)
441  goto extra_metrics_only;
442 
443  if (s->enable_conf_interval) {
444  err = vmaf_model_collection_load_from_path(&model, &model_collection,
445  &model_cfg, s->model_path);
446  if (err) {
448  "problem loading model file: %s\n", s->model_path);
449  goto exit;
450  }
451 
452  err = vmaf_use_features_from_model_collection(s->vmaf, model_collection);
453  if (err) {
455  "problem loading feature extractors from model file: %s\n",
456  s->model_path);
457  goto exit;
458  }
459  } else {
460  err = vmaf_model_load_from_path(&model, &model_cfg, s->model_path);
461  if (err) {
463  "problem loading model file: %s\n", s->model_path);
464  goto exit;
465  }
466  err = vmaf_use_features_from_model(s->vmaf, model);
467  if (err) {
469  "problem loading feature extractors from model file: %s\n",
470  s->model_path);
471  goto exit;
472  }
473  }
474 
475 extra_metrics_only:
476  if (s->psnr) {
477  VmafFeatureDictionary *d = NULL;
478  vmaf_feature_dictionary_set(&d, "enable_chroma", "false");
479 
480  err = vmaf_use_feature(s->vmaf, "psnr", d);
481  if (err) {
483  "problem loading feature extractor: psnr\n");
484  goto exit;
485  }
486  }
487 
488  if (s->ssim) {
489  err = vmaf_use_feature(s->vmaf, "float_ssim", NULL);
490  if (err) {
492  "problem loading feature extractor: ssim\n");
493  goto exit;
494  }
495  }
496 
497  if (s->ms_ssim) {
498  err = vmaf_use_feature(s->vmaf, "float_ms_ssim", NULL);
499  if (err) {
501  "problem loading feature extractor: ms_ssim\n");
502  goto exit;
503  }
504  }
505 
506 exit:
507  return err;
508 }
509 
511 {
512  LIBVMAFContext *s = ctx->priv;
513  int err = 0;
514 
515  VmafConfiguration cfg = {
516  .log_level = log_level_map(av_log_get_level()),
517  .n_subsample = s->n_subsample,
518  .n_threads = s->n_threads,
519  };
520 
521  err = vmaf_init(&s->vmaf, cfg);
522  if (err)
523  return AVERROR(EINVAL);
524 
526  if (err)
527  return err;
528 
529  err = parse_models(ctx);
530  if (err)
531  return err;
532 
533  err = parse_features(ctx);
534  if (err)
535  return err;
536 
537  s->fs.on_event = do_vmaf;
538  return 0;
539 }
540 
541 static const enum AVPixelFormat pix_fmts[] = {
545 };
546 
548 {
549  AVFilterContext *ctx = inlink->dst;
550  LIBVMAFContext *s = ctx->priv;
551  const AVPixFmtDescriptor *desc;
552  int err = 0;
553 
554  if (ctx->inputs[0]->w != ctx->inputs[1]->w) {
555  av_log(ctx, AV_LOG_ERROR, "input width must match.\n");
556  err |= AVERROR(EINVAL);
557  }
558 
559  if (ctx->inputs[0]->h != ctx->inputs[1]->h) {
560  av_log(ctx, AV_LOG_ERROR, "input height must match.\n");
561  err |= AVERROR(EINVAL);
562  }
563 
564  if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
565  av_log(ctx, AV_LOG_ERROR, "input pix_fmt must match.\n");
566  err |= AVERROR(EINVAL);
567  }
568 
569  if (err)
570  return err;
571 
572  desc = av_pix_fmt_desc_get(inlink->format);
573  s->bpc = desc->comp[0].depth;
574 
575  return 0;
576 }
577 
578 static int config_output(AVFilterLink *outlink)
579 {
580  AVFilterContext *ctx = outlink->src;
581  LIBVMAFContext *s = ctx->priv;
582  AVFilterLink *mainlink = ctx->inputs[0];
583  int ret;
584 
586  if (ret < 0)
587  return ret;
588  outlink->w = mainlink->w;
589  outlink->h = mainlink->h;
590  outlink->time_base = mainlink->time_base;
591  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
592  outlink->frame_rate = mainlink->frame_rate;
593  if ((ret = ff_framesync_configure(&s->fs)) < 0)
594  return ret;
595 
596  return 0;
597 }
598 
600 {
601  LIBVMAFContext *s = ctx->priv;
602  return ff_framesync_activate(&s->fs);
603 }
604 
605 static enum VmafOutputFormat log_fmt_map(const char *log_fmt)
606 {
607  if (log_fmt) {
608  if (av_stristr(log_fmt, "xml"))
609  return VMAF_OUTPUT_FORMAT_XML;
610  if (av_stristr(log_fmt, "json"))
611  return VMAF_OUTPUT_FORMAT_JSON;
612  if (av_stristr(log_fmt, "csv"))
613  return VMAF_OUTPUT_FORMAT_CSV;
614  if (av_stristr(log_fmt, "sub"))
615  return VMAF_OUTPUT_FORMAT_SUB;
616  }
617 
618  return VMAF_OUTPUT_FORMAT_XML;
619 }
620 
621 static enum VmafPoolingMethod pool_method_map(const char *pool_method)
622 {
623  if (pool_method) {
624  if (av_stristr(pool_method, "min"))
625  return VMAF_POOL_METHOD_MIN;
626  if (av_stristr(pool_method, "mean"))
627  return VMAF_POOL_METHOD_MEAN;
628  if (av_stristr(pool_method, "harmonic_mean"))
629  return VMAF_POOL_METHOD_HARMONIC_MEAN;
630  }
631 
632  return VMAF_POOL_METHOD_MEAN;
633 }
634 
636 {
637  LIBVMAFContext *s = ctx->priv;
638  int err = 0;
639 
640  ff_framesync_uninit(&s->fs);
641 
642  if (!s->frame_cnt)
643  goto clean_up;
644 
645  err = vmaf_read_pictures(s->vmaf, NULL, NULL, 0);
646  if (err) {
648  "problem flushing libvmaf context.\n");
649  }
650 
651  for (unsigned i = 0; i < s->model_cnt; i++) {
652  double vmaf_score;
653  err = vmaf_score_pooled(s->vmaf, s->model[i], pool_method_map(s->pool),
654  &vmaf_score, 0, s->frame_cnt - 1);
655  if (err) {
657  "problem getting pooled vmaf score.\n");
658  }
659 
660  av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n", vmaf_score);
661  }
662 
663  if (s->vmaf) {
664  if (s->log_path && !err)
665  vmaf_write_output(s->vmaf, s->log_path, log_fmt_map(s->log_fmt));
666  }
667 
668 clean_up:
669  if (s->model) {
670  for (unsigned i = 0; i < s->model_cnt; i++) {
671  if (s->model[i])
672  vmaf_model_destroy(s->model[i]);
673  }
674  av_free(s->model);
675  }
676 
677  if (s->vmaf)
678  vmaf_close(s->vmaf);
679 }
680 
681 static const AVFilterPad libvmaf_inputs[] = {
682  {
683  .name = "main",
684  .type = AVMEDIA_TYPE_VIDEO,
685  },{
686  .name = "reference",
687  .type = AVMEDIA_TYPE_VIDEO,
688  .config_props = config_input_ref,
689  },
690 };
691 
692 static const AVFilterPad libvmaf_outputs[] = {
693  {
694  .name = "default",
695  .type = AVMEDIA_TYPE_VIDEO,
696  .config_props = config_output,
697  },
698 };
699 
701  .name = "libvmaf",
702  .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."),
703  .preinit = libvmaf_framesync_preinit,
704  .init = init,
705  .uninit = uninit,
706  .activate = activate,
707  .priv_size = sizeof(LIBVMAFContext),
708  .priv_class = &libvmaf_class,
712 };
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:134
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
psnr
static double psnr(double d)
Definition: ffmpeg.c:751
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
libvmaf_inputs
static const AVFilterPad libvmaf_inputs[]
Definition: vf_libvmaf.c:681
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:304
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
av_stristr
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle.
Definition: avstring.c:59
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:170
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
LIBVMAFContext
Definition: vf_libvmaf.c:39
LIBVMAFContext::enable_conf_interval
int enable_conf_interval
Definition: vf_libvmaf.c:53
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
log_fmt_map
static enum VmafOutputFormat log_fmt_map(const char *log_fmt)
Definition: vf_libvmaf.c:605
AVOption
AVOption.
Definition: opt.h:251
AV_PIX_FMT_YUV420P16LE
@ AV_PIX_FMT_YUV420P16LE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:121
AVDictionary
Definition: dict.c:32
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:175
FFFrameSync
Frame sync structure.
Definition: framesync.h:168
video.h
AV_PIX_FMT_YUV444P16LE
@ AV_PIX_FMT_YUV444P16LE
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:125
formats.h
AV_PIX_FMT_YUV420P12LE
@ AV_PIX_FMT_YUV420P12LE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:261
fail
#define fail()
Definition: checkasm.h:134
LIBVMAFContext::fs
FFFrameSync fs
Definition: vf_libvmaf.c:41
LIBVMAFContext::n_subsample
int n_subsample
Definition: vf_libvmaf.c:52
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
AV_PIX_FMT_YUV420P10LE
@ AV_PIX_FMT_YUV420P10LE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:149
AV_PIX_FMT_YUV444P12LE
@ AV_PIX_FMT_YUV444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:269
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
s
#define s(width, name)
Definition: cbs_vp9.c:256
LIBVMAFContext::model
VmafModel ** model
Definition: vf_libvmaf.c:57
ff_vf_libvmaf
const AVFilter ff_vf_libvmaf
Definition: vf_libvmaf.c:700
config_input_ref
static int config_input_ref(AVFilterLink *inlink)
Definition: vf_libvmaf.c:547
AVDictionaryEntry::key
char * key
Definition: dict.h:90
LIBVMAFContext::ssim
int ssim
Definition: vf_libvmaf.c:48
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:189
copy_picture_data
static int copy_picture_data(AVFrame *src, VmafPicture *dst, unsigned bpc)
Definition: vf_libvmaf.c:109
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
LIBVMAFContext::model_cnt
unsigned model_cnt
Definition: vf_libvmaf.c:58
LIBVMAFContext::frame_cnt
unsigned frame_cnt
Definition: vf_libvmaf.c:59
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FRAMESYNC_DEFINE_CLASS
FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs)
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:190
AV_PIX_FMT_YUV444P10LE
@ AV_PIX_FMT_YUV444P10LE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:155
LIBVMAFContext::vmaf
VmafContext * vmaf
Definition: vf_libvmaf.c:56
activate
static int activate(AVFilterContext *ctx)
Definition: vf_libvmaf.c:599
LIBVMAFContext::enable_transform
int enable_transform
Definition: vf_libvmaf.c:45
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:437
AV_PIX_FMT_YUV422P16LE
@ AV_PIX_FMT_YUV422P16LE
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:123
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
do_vmaf
static int do_vmaf(FFFrameSync *fs)
Definition: vf_libvmaf.c:130
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:258
LIBVMAFContext::log_fmt
char * log_fmt
Definition: vf_libvmaf.c:44
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_libvmaf.c:635
LIBVMAFContext::feature_cfg
char * feature_cfg
Definition: vf_libvmaf.c:55
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_libvmaf.c:541
pool_method_map
static enum VmafPoolingMethod pool_method_map(const char *pool_method)
Definition: vf_libvmaf.c:621
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_libvmaf.c:578
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:372
AV_PIX_FMT_YUV422P10LE
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:151
LIBVMAFContext::n_threads
int n_threads
Definition: vf_libvmaf.c:51
LIBVMAFContext::log_path
char * log_path
Definition: vf_libvmaf.c:43
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
LIBVMAFContext::model_cfg
char * model_cfg
Definition: vf_libvmaf.c:54
version
version
Definition: libkvazaar.c:313
LIBVMAFContext::bpc
unsigned bpc
Definition: vf_libvmaf.c:60
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
delimited_dict_parse
static AVDictionary ** delimited_dict_parse(char *str, unsigned *cnt)
Definition: vf_libvmaf.c:167
LIBVMAFContext::ms_ssim
int ms_ssim
Definition: vf_libvmaf.c:49
internal.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
LIBVMAFContext::psnr
int psnr
Definition: vf_libvmaf.c:47
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
AVFilter
Filter definition.
Definition: avfilter.h:171
ret
ret
Definition: filter_design.txt:187
LIBVMAFContext::model_path
char * model_path
Definition: vf_libvmaf.c:42
libvmaf_outputs
static const AVFilterPad libvmaf_outputs[]
Definition: vf_libvmaf.c:692
pix_fmt_map
static enum VmafPixelFormat pix_fmt_map(enum AVPixelFormat av_pix_fmt)
Definition: vf_libvmaf.c:86
framesync.h
libvmaf_options
static const AVOption libvmaf_options[]
Definition: vf_libvmaf.c:66
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
av_dict_parse_string
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
Definition: dict.c:200
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
LIBVMAFContext::pool
char * pool
Definition: vf_libvmaf.c:50
OFFSET
#define OFFSET(x)
Definition: vf_libvmaf.c:63
FLAGS
#define FLAGS
Definition: vf_libvmaf.c:64
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:280
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_libvmaf.c:510
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:191
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
log_level_map
static enum VmafLogLevel log_level_map(int log_level)
Definition: vf_libvmaf.c:406
d
d
Definition: ffmpeg_filter.c:156
convert_header.str
string str
Definition: convert_header.py:20
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
parse_models
static int parse_models(AVFilterContext *ctx)
Definition: vf_libvmaf.c:274
AVDictionaryEntry::value
char * value
Definition: dict.h:91
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:355
LIBVMAFContext::phone_model
int phone_model
Definition: vf_libvmaf.c:46
avstring.h
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:390
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
drawutils.h
parse_features
static int parse_features(AVFilterContext *ctx)
Definition: vf_libvmaf.c:219
AV_PIX_FMT_YUV422P12LE
@ AV_PIX_FMT_YUV422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:265
parse_deprecated_options
static int parse_deprecated_options(AVFilterContext *ctx)
Definition: vf_libvmaf.c:424
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AV_OPT_FLAG_DEPRECATED
#define AV_OPT_FLAG_DEPRECATED
set if option is deprecated, users should refer to AVOption.help text for more information
Definition: opt.h:298