[FFmpeg-devel] [PATCH] avfilter: add VMAF filter
Ashish Pratap Singh
ashk43712 at gmail.com
Sat Jun 24 23:31:21 EEST 2017
Hi, this is the pull request at Netflix's vmaf for library integration:
https://github.com/Netflix/vmaf/pull/90
On Sun, Jun 25, 2017 at 1:20 AM, Ashish Singh <ashk43712 at gmail.com> wrote:
> This is VMAF filter which requires Netflix's vmaf library installed,
> but currently there is no library implementation in the Netflix's vmaf.
> I will open a pull request soon to Netflix's vmaf for the library
> integration
> code and provide a link in this thread.
> After installing the Netflix's vmaf library, do --enable-libvmaf at the
> time of configure.
> It can be run using "ffmpeg -i main -i ref -lavfi
> vmaf="model_path=model_path" -f null -"
>
> ---
> configure | 5 +
> libavfilter/Makefile | 1 +
> libavfilter/allfilters.c | 1 +
> libavfilter/vf_vmaf.c | 429 ++++++++++++++++++++++++++++++
> +++++++++++++++++
> 4 files changed, 436 insertions(+)
> create mode 100644 libavfilter/vf_vmaf.c
>
> diff --git a/configure b/configure
> index 5ae5227..faaf236 100755
> --- a/configure
> +++ b/configure
> @@ -259,6 +259,7 @@ External library support:
> --enable-libtwolame enable MP2 encoding via libtwolame [no]
> --enable-libv4l2 enable libv4l2/v4l-utils [no]
> --enable-libvidstab enable video stabilization using vid.stab [no]
> + --enable-libvmaf enable vmaf filter via libvmaf [no]
> --enable-libvo-amrwbenc enable AMR-WB encoding via libvo-amrwbenc [no]
> --enable-libvorbis enable Vorbis en/decoding via libvorbis,
> native implementation exists [no]
> @@ -1569,6 +1570,7 @@ EXTERNAL_LIBRARY_LIST="
> libtheora
> libtwolame
> libv4l2
> + libvmaf
> libvorbis
> libvpx
> libwavpack
> @@ -2878,6 +2880,7 @@ libspeex_encoder_deps="libspeex"
> libspeex_encoder_select="audio_frame_queue"
> libtheora_encoder_deps="libtheora"
> libtwolame_encoder_deps="libtwolame"
> +libvmaf_filter_deps="libvmaf"
> libvo_amrwbenc_encoder_deps="libvo_amrwbenc"
> libvorbis_decoder_deps="libvorbis"
> libvorbis_encoder_deps="libvorbis"
> @@ -5845,6 +5848,8 @@ enabled libtwolame && require libtwolame
> twolame.h twolame_init -ltwolame
> die "ERROR: libtwolame must be installed
> and version must be >= 0.3.10"; }
> enabled libv4l2 && require_pkg_config libv4l2 libv4l2.h
> v4l2_ioctl
> enabled libvidstab && require_pkg_config "vidstab >= 0.98"
> vid.stab/libvidstab.h vsMotionDetectInit
> +enabled libvmaf && { check_lib libvmaf "libvmaf.h"
> "compute_vmaf" -lvmaf -lstdc++ -lpthread -lm ||
> + die "ERROR: libvmaf must be installed"; }
> enabled libvo_amrwbenc && require libvo_amrwbenc vo-amrwbenc/enc_if.h
> E_IF_init -lvo-amrwbenc
> enabled libvorbis && require libvorbis vorbis/vorbisenc.h
> vorbis_info_init -lvorbisenc -lvorbis -logg
>
> diff --git a/libavfilter/Makefile b/libavfilter/Makefile
> index f7dfe8a..1c4bd56 100644
> --- a/libavfilter/Makefile
> +++ b/libavfilter/Makefile
> @@ -314,6 +314,7 @@ OBJS-$(CONFIG_VFLIP_FILTER) +=
> vf_vflip.o
> OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o
> vf_vidstabdetect.o
> OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o
> vf_vidstabtransform.o
> OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o
> +OBJS-$(CONFIG_VMAF_FILTER) += vf_vmaf.o dualinput.o
> framesync.o
> OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o framesync.o
> OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
> OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o
> diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
> index cd35ae4..6894a6f 100644
> --- a/libavfilter/allfilters.c
> +++ b/libavfilter/allfilters.c
> @@ -325,6 +325,7 @@ static void register_all(void)
> REGISTER_FILTER(VIDSTABDETECT, vidstabdetect, vf);
> REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf);
> REGISTER_FILTER(VIGNETTE, vignette, vf);
> + REGISTER_FILTER(VMAF, vmaf, vf);
> REGISTER_FILTER(VSTACK, vstack, vf);
> REGISTER_FILTER(W3FDIF, w3fdif, vf);
> REGISTER_FILTER(WAVEFORM, waveform, vf);
> diff --git a/libavfilter/vf_vmaf.c b/libavfilter/vf_vmaf.c
> new file mode 100644
> index 0000000..12aaede
> --- /dev/null
> +++ b/libavfilter/vf_vmaf.c
> @@ -0,0 +1,429 @@
> +/*
> + * Copyright (c) 2017 Ronald S. Bultje <rsbultje at gmail.com>
> + * Copyright (c) 2017 Ashish Pratap Singh <ashk43712 at gmail.com>
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
> + */
> +
> +/**
> + * @file
> + * Caculate the VMAF between two input videos.
> + */
> +
> +#include <inttypes.h>
> +#include <pthread.h>
> +#include <string.h>
> +#include <libvmaf.h>
> +#include "libavutil/avstring.h"
> +#include "libavutil/opt.h"
> +#include "libavutil/pixdesc.h"
> +#include "avfilter.h"
> +#include "dualinput.h"
> +#include "drawutils.h"
> +#include "formats.h"
> +#include "internal.h"
> +#include "video.h"
> +
> +typedef struct VMAFContext {
> + const AVClass *class;
> + FFDualInputContext dinput;
> + char *format;
> + int width;
> + int height;
> + double curr_vmaf_score;
> + double vmaf_score;
> + uint64_t nb_frames;
> + pthread_t vmaf_thread;
> + pthread_attr_t attr;
> + pthread_mutex_t lock;
> + pthread_cond_t cond;
> + int eof;
> + AVFrame *gmain;
> + AVFrame *gref;
> + int frame_set;
> + char *model_path;
> + char *log_path;
> + char *log_fmt;
> + int disable_clip;
> + int disable_avx;
> + int enable_transform;
> + int phone_model;
> + int psnr;
> + int ssim;
> + int ms_ssim;
> + char *pool;
> + FILE *stats_file;
> + char *stats_file_str;
> + int stats_version;
> + int stats_header_written;
> + int stats_add_max;
> + int nb_components;
> +} VMAFContext;
> +
> +#define OFFSET(x) offsetof(VMAFContext, x)
> +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
> +
> +static const AVOption vmaf_options[] = {
> + {"stats_file", "Set file where to store per-frame difference
> information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0,
> 0, FLAGS },
> + {"f", "Set file where to store per-frame difference
> information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0,
> 0, FLAGS },
> + {"stats_version", "Set the format version for the stats file.",
> OFFSET(stats_version), AV_OPT_TYPE_INT, {.i64=1}, 1, 2,
> FLAGS },
> + {"output_max", "Add raw stats (max values) to the output log.",
> OFFSET(stats_add_max), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
> + {"model_path", "Set the model to be used for computing vmaf.",
> OFFSET(model_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
> + {"log_path", "Set the file path to be used to store logs.",
> OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
> + {"log_fmt", "Set the format of the log (xml or json).",
> OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
> + {"disable_clip", "Disables clip for computing vmaf.",
> OFFSET(disable_clip), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
> + {"disable avx", "Disables avx for computing vmaf.",
> OFFSET(disable_avx), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
> + {"enable_transform", "Enables transform for computing vmaf.",
> OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
> + {"phone_model", "Invokes the phone model that will generate higher
> VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0},
> 0, 1, FLAGS},
> + {"psnr", "Enables computing psnr along with vmaf.",
> OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
> + {"ssim", "Enables computing ssim along with vmaf.",
> OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
> + {"ms_ssim", "Enables computing ms-ssim along with vmaf.",
> OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
> + {"pool", "Set the pool method to be used for computing vmaf.",
> OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
> + { NULL }
> +};
> +
> +AVFILTER_DEFINE_CLASS(vmaf);
> +
> +static int read_frame_8bit(float *ref_data, float *main_data, int stride,
> double *score, void *ctx){
> +
> + VMAFContext *s = (VMAFContext *)ctx;
> +
> + if (s->eof == 1) {
> + s->eof++;
> + }
> + else if (s->eof == 2) {
> + return s->eof;
> + }
> +
> + pthread_mutex_lock(&s->lock);
> +
> + while (s->frame_set == 0) {
> + pthread_cond_wait(&s->cond, &s->lock);
> + }
> +
> + int ref_stride = s->gref->linesize[0];
> + int main_stride = s->gmain->linesize[0];
> +
> + uint8_t *ptr = s->gref->data[0];
> + float *ptr1 = ref_data;
> +
> + int h = s->height;
> + int w = s->width;
> +
> + int i,j;
> +
> + for (i = 0; i < h; i++) {
> + for ( j = 0; j < w; j++) {
> + ptr1[j] = (float)ptr[j];
> + }
> + ptr += ref_stride/sizeof(*ptr);
> + ptr1 += stride/sizeof(*ptr1);
> + }
> +
> + ptr = s->gmain->data[0];
> + ptr1 = main_data;
> +
> + for (i = 0; i < h; i++) {
> + for (j = 0; j < w; j++) {
> + ptr1[j] = (float)ptr[j];
> + }
> + ptr += main_stride/sizeof(*ptr);
> + ptr1 += stride/sizeof(*ptr1);
> + }
> +
> + s->frame_set = 0;
> +
> + pthread_cond_signal(&s->cond);
> + pthread_mutex_unlock(&s->lock);
> +
> + return 0;
> +}
> +
> +static int read_frame_10bit(float *ref_data, float *main_data, int
> stride, double *score, void *ctx){
> +
> + VMAFContext *s = (VMAFContext *)ctx;
> +
> + if (s->eof == 1) {
> + s->eof++;
> + }
> + else if (s->eof == 2) {
> + return s->eof;
> + }
> +
> + pthread_mutex_lock(&s->lock);
> +
> + while (s->frame_set == 0) {
> + pthread_cond_wait(&s->cond, &s->lock);
> + }
> +
> + int ref_stride = s->gref->linesize[0];
> + int main_stride = s->gmain->linesize[0];
> +
> + uint16_t *ptr = s->gref->data[0];
> + float *ptr1 = ref_data;
> +
> + int h = s->height;
> + int w = s->width;
> +
> + int i,j;
> +
> + for (i = 0; i < h; i++) {
> + for (j = 0; j < w; j++) {
> + ptr1[j] = (float)ptr[j];
> + }
> + ptr += ref_stride / sizeof(*ptr);
> + ptr1 += stride / sizeof(*ptr1);
> + }
> +
> + ptr = s->gmain->data[0];
> + ptr1 = main_data;
> +
> + for (i = 0; i < h; i++) {
> + for (j = 0; j < w; j++) {
> + ptr1[j] = (float)ptr[j];
> + }
> + ptr += main_stride / sizeof(*ptr);
> + ptr1 += stride / sizeof(*ptr1);
> + }
> +
> + s->frame_set = 0;
> +
> + pthread_cond_signal(&s->cond);
> + pthread_mutex_unlock(&s->lock);
> +
> + return 0;
> +}
> +
> +static void compute_vmaf_score(VMAFContext *s)
> +{
> + void *func;
> + if (strcmp(s->format, "yuv420p") || strcmp(s->format, "yuv422p") ||
> strcmp(s->format, "yuv444p")) {
> + func = read_frame_8bit;
> + }
> + else {
> + func = read_frame_10bit;
> + }
> +
> + s->vmaf_score = compute_vmaf(s->format, s->width, s->height, func,
> + s->model_path, s->log_path, s->log_fmt,
> + s->disable_clip, s->disable_avx,
> + s->enable_transform, s->phone_model,
> + s->psnr, s->ssim, s->ms_ssim, s->pool,
> s);
> +}
> +
> +static void *call_vmaf(void *ctx)
> +{
> + VMAFContext *s = (VMAFContext *)ctx;
> + compute_vmaf_score(s);
> + pthread_exit(NULL);
> +}
> +
> +static AVFrame *do_vmaf(AVFilterContext *ctx, AVFrame *main, const
> AVFrame *ref)
> +{
> + VMAFContext *s = ctx->priv;
> +
> + pthread_mutex_lock(&s->lock);
> +
> + while (s->frame_set != 0) {
> + pthread_cond_wait(&s->cond, &s->lock);
> + }
> +
> + av_frame_ref(s->gref, ref);
> + av_frame_ref(s->gmain, main);
> +
> + s->frame_set = 1;
> +
> + pthread_cond_signal(&s->cond);
> + pthread_mutex_unlock(&s->lock);
> +
> + return main;
> +}
> +
> +static av_cold int init(AVFilterContext *ctx)
> +{
> + VMAFContext *s = ctx->priv;
> +
> + if (s->stats_file_str) {
> + if (s->stats_version < 2 && s->stats_add_max) {
> + av_log(ctx, AV_LOG_ERROR,
> + "stats_add_max was specified but stats_version < 2.\n"
> );
> + return AVERROR(EINVAL);
> + }
> + if (!strcmp(s->stats_file_str, "-")) {
> + s->stats_file = stdout;
> + } else {
> + s->stats_file = fopen(s->stats_file_str, "w");
> + if (!s->stats_file) {
> + int err = AVERROR(errno);
> + char buf[128];
> + av_strerror(err, buf, sizeof(buf));
> + av_log(ctx, AV_LOG_ERROR, "Could not open stats file %s:
> %s\n",
> + s->stats_file_str, buf);
> + return err;
> + }
> + }
> + }
> +
> + s->gref = av_frame_alloc();
> + s->gmain = av_frame_alloc();
> +
> + pthread_mutex_init(&s->lock, NULL);
> + pthread_cond_init (&s->cond, NULL);
> +
> + pthread_attr_init(&s->attr);
> +
> + s->dinput.process = do_vmaf;
> + return 0;
> +}
> +
> +static int query_formats(AVFilterContext *ctx)
> +{
> + static const enum AVPixelFormat pix_fmts[] = {
> + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
> + AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE,
> AV_PIX_FMT_YUV420P10LE,
> + AV_PIX_FMT_NONE
> + };
> +
> + AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
> + if (!fmts_list)
> + return AVERROR(ENOMEM);
> + return ff_set_common_formats(ctx, fmts_list);
> +}
> +
> +
> +static int config_input_ref(AVFilterLink *inlink)
> +{
> + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
> + AVFilterContext *ctx = inlink->dst;
> + VMAFContext *s = ctx->priv;
> + s->nb_components = desc->nb_components;
> + if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
> + ctx->inputs[0]->h != ctx->inputs[1]->h) {
> + av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must
> be same.\n");
> + return AVERROR(EINVAL);
> + }
> + if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
> + av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel
> format.\n");
> + return AVERROR(EINVAL);
> + }
> + if (!(s->model_path)) {
> + av_log(ctx, AV_LOG_ERROR, "No model specified.\n");
> + return AVERROR(EINVAL);
> + }
> +
> + s->format = av_get_pix_fmt_name(ctx->inputs[0]->format);
> + s->width = ctx->inputs[0]->w;
> + s->height = ctx->inputs[0]->h;
> +
> + int rc = pthread_create(&s->vmaf_thread, &s->attr, call_vmaf, (void
> *)s);
> + if (rc) {
> + av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n");
> + return AVERROR(EINVAL);
> + }
> +
> + return 0;
> +}
> +
> +
> +static int config_output(AVFilterLink *outlink)
> +{
> + AVFilterContext *ctx = outlink->src;
> + VMAFContext *s = ctx->priv;
> + AVFilterLink *mainlink = ctx->inputs[0];
> + int ret;
> +
> + outlink->w = mainlink->w;
> + outlink->h = mainlink->h;
> + outlink->time_base = mainlink->time_base;
> + outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
> + outlink->frame_rate = mainlink->frame_rate;
> + if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
> + return ret;
> +
> + return 0;
> +}
> +
> +static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
> +{
> + VMAFContext *s = inlink->dst->priv;
> + return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
> +}
> +
> +static int request_frame(AVFilterLink *outlink)
> +{
> + VMAFContext *s = outlink->src->priv;
> + return ff_dualinput_request_frame(&s->dinput, outlink);
> +}
> +
> +static av_cold void uninit(AVFilterContext *ctx)
> +{
> + VMAFContext *s = ctx->priv;
> +
> + ff_dualinput_uninit(&s->dinput);
> +
> + if (s->stats_file && s->stats_file != stdout)
> + fclose(s->stats_file);
> +
> + pthread_mutex_lock(&s->lock);
> + s->eof = 1;
> + pthread_cond_signal(&s->cond);
> + pthread_mutex_unlock(&s->lock);
> +
> + pthread_join(s->vmaf_thread, NULL);
> +
> + av_frame_free(&s->gref);
> + av_frame_free(&s->gmain);
> +
> + av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score);
> +
> +}
> +
> +static const AVFilterPad vmaf_inputs[] = {
> + {
> + .name = "main",
> + .type = AVMEDIA_TYPE_VIDEO,
> + .filter_frame = filter_frame,
> + },{
> + .name = "reference",
> + .type = AVMEDIA_TYPE_VIDEO,
> + .filter_frame = filter_frame,
> + .config_props = config_input_ref,
> + },
> + { NULL }
> +};
> +
> +static const AVFilterPad vmaf_outputs[] = {
> + {
> + .name = "default",
> + .type = AVMEDIA_TYPE_VIDEO,
> + .config_props = config_output,
> + .request_frame = request_frame,
> + },
> + { NULL }
> +};
> +
> +AVFilter ff_vf_vmaf = {
> + .name = "vmaf",
> + .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between
> two videos."),
> + .init = init,
> + .uninit = uninit,
> + .query_formats = query_formats,
> + .priv_size = sizeof(VMAFContext),
> + .priv_class = &vmaf_class,
> + .inputs = vmaf_inputs,
> + .outputs = vmaf_outputs,
> +};
> --
> 2.7.4
>
>
More information about the ffmpeg-devel
mailing list