[FFmpeg-devel] [PATCH] lavfi: add splice filters
Nicolas George
nicolas.george at normalesup.org
Wed Apr 10 17:11:33 CEST 2013
Le primidi 21 germinal, an CCXXI, Stefano Sabatini a écrit :
> >From f88d1af608f99ed4f4fe7e1f9e2489049ade04f8 Mon Sep 17 00:00:00 2001
> From: Stefano Sabatini <stefasab at gmail.com>
> Date: Mon, 8 Apr 2013 15:16:06 +0200
> Subject: [PATCH] lavfi: add splice filters
>
> TODO: bump minor, add changelog entry
> ---
> doc/filters.texi | 43 +++++++
> libavfilter/Makefile | 2 +
> libavfilter/allfilters.c | 2 +
> libavfilter/f_splice.c | 293 ++++++++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 340 insertions(+)
> create mode 100644 libavfilter/f_splice.c
>
> diff --git a/doc/filters.texi b/doc/filters.texi
> index d01ae82..3b46150 100644
> --- a/doc/filters.texi
> +++ b/doc/filters.texi
> @@ -6792,6 +6792,49 @@ setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
> @end example
> @end itemize
>
> + at section asplice, splice
> +
> +Mux frames from sevaral inputs together.
> +
> + at code{asplice} works on audio frames, @code{splice} on video frames.
> +
> +These filters read frames from several inputs and send the oldest
> +cached frame to the output.
AFAIK, a cache is data that is kept to increase future efficiency but can be
discarded if necessary. I believe "queued" would be more appropriate, here
and later.
> +
> +Input streams must have a well defined, monotonically increasing frame
> +timestamp value.
> +
> +In order to submit one frame to output, these filters need to cache at
> +least one frame for each input, so they cannot work in case one input
> +is not terminated and will not receive incoming frames. Also,
I do not get that. Either request_frame succeeds and we get a frame or it
returns EOF and is terminated.
> +depending on inputs synchronization, the filters may drop frames in
> +case one input receives more frames than the other ones, and the cache
> +is already filled.
> +
> +These filters accept parameters as a list of @var{key}=@var{value}
> +pairs, separated by ":". They accept the following options.
> +
> + at table @option
> + at item nb_inputs, n
> +Set the number of different inputs, it is 2 by default.
> + at end table
> +
> + at subsection Examples
> +
> + at itemize
> + at item
> +Interleave frames belonging to different streams using @command{ffmpeg}:
> + at example
> +ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] splice" out.avi
> + at end example
> +
> + at item
> +Add flickering blur effect:
> + at example
> +select='gt(random(0), 0.2):branch=1 [tmp], boxblur=2:2, [tmp] splice"
> + at end example
> + at end itemize
> +
> @section ebur128
>
> EBU R128 scanner filter. This filter takes an audio stream as input and outputs
> diff --git a/libavfilter/Makefile b/libavfilter/Makefile
> index e1eb35d..a315e17 100644
> --- a/libavfilter/Makefile
> +++ b/libavfilter/Makefile
> @@ -66,6 +66,7 @@ OBJS-$(CONFIG_ASETNSAMPLES_FILTER) += af_asetnsamples.o
> OBJS-$(CONFIG_ASETPTS_FILTER) += f_setpts.o
> OBJS-$(CONFIG_ASETTB_FILTER) += f_settb.o
> OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o
> +OBJS-$(CONFIG_ASPLICE_FILTER) += f_splice.o
> OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
> OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o
> OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o
> @@ -158,6 +159,7 @@ OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o
> OBJS-$(CONFIG_SETTB_FILTER) += f_settb.o
> OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o
> OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o
> +OBJS-$(CONFIG_SPLICE_FILTER) += f_splice.o
> OBJS-$(CONFIG_SPLIT_FILTER) += split.o
> OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o
> OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o
> diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
> index 4972322..4ab7167 100644
> --- a/libavfilter/allfilters.c
> +++ b/libavfilter/allfilters.c
> @@ -62,6 +62,7 @@ void avfilter_register_all(void)
> REGISTER_FILTER(ASETPTS, asetpts, af);
> REGISTER_FILTER(ASETTB, asettb, af);
> REGISTER_FILTER(ASHOWINFO, ashowinfo, af);
> + REGISTER_FILTER(ASPLICE, asplice, af);
> REGISTER_FILTER(ASPLIT, asplit, af);
> REGISTER_FILTER(ASTREAMSYNC, astreamsync, af);
> REGISTER_FILTER(ASYNCTS, asyncts, af);
> @@ -153,6 +154,7 @@ void avfilter_register_all(void)
> REGISTER_FILTER(SETTB, settb, vf);
> REGISTER_FILTER(SHOWINFO, showinfo, vf);
> REGISTER_FILTER(SMARTBLUR, smartblur, vf);
> + REGISTER_FILTER(SPLICE, splice, vf);
> REGISTER_FILTER(SPLIT, split, vf);
> REGISTER_FILTER(STEREO3D, stereo3d, vf);
> REGISTER_FILTER(SUBTITLES, subtitles, vf);
> diff --git a/libavfilter/f_splice.c b/libavfilter/f_splice.c
> new file mode 100644
> index 0000000..95bb455
> --- /dev/null
> +++ b/libavfilter/f_splice.c
> @@ -0,0 +1,293 @@
> +/*
> + * Copyright (c) 2013 Stefano Sabatini
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +/**
> + * @file
> + * audio and video splicer
> + */
> +
> +#include "libavutil/avassert.h"
> +#include "libavutil/opt.h"
> +#include "avfilter.h"
> +#include "bufferqueue.h"
> +#include "formats.h"
> +#include "internal.h"
> +#include "audio.h"
> +#include "video.h"
> +
> +typedef struct {
> + const AVClass *class;
> + int nb_inputs;
> + struct FFBufQueue *queues;
> + int req_fulfilled;
> +} SpliceContext;
> +
> +#define OFFSET(x) offsetof(SpliceContext, x)
> +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
Unused macro.
> +
> +#define DEFINE_OPTIONS(filt_name, filt_type) \
> +static const AVOption filt_name##_options[] = { \
> + { "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, \
> + .flags = AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
> + { "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, \
> + .flags = AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
> + { NULL }, \
> +}
> +
> +inline static int push_frame(AVFilterContext *ctx, int queue_idx)
> +{
> + SpliceContext *splice = ctx->priv;
> + AVFrame *frame = ff_bufqueue_get(&splice->queues[queue_idx]);
> + int ret;
> +
> + av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n",
> + queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q));
> + ret = ff_filter_frame(ctx->outputs[0], frame);
> + if (ret >= 0)
> + splice->req_fulfilled = 1;
You can probably omit all that by setting FF_LINK_FLAG_REQUEST_LOOP on the
output. See the recent "simplify request_frame loop" commits.
> + return ret;
> +}
> +
> +static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
> +{
> + AVFilterContext *ctx = inlink->dst;
> + SpliceContext *splice = ctx->priv;
> + int i, queue_idx = -1;
> + int64_t pts_min = INT64_MAX;
> +
> + if (frame->pts == AV_NOPTS_VALUE) {
> + av_log(ctx, AV_LOG_WARNING,
> + "NOPTS value for input frame cannot be accepted, frame discarded\n");
> + av_frame_free(&frame);
> + return 0;
An error would seem more appropriate.
> + }
> +
> + /* queue frame */
> + for (i = 0; i < ctx->nb_inputs; i++) {
> + if (inlink == ctx->inputs[i]) {
FF_INLINK_IDX
> + frame->pts = av_rescale_q(frame->pts,
> + inlink->time_base, AV_TIME_BASE_Q);
> + av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n",
> + frame->pts * av_q2d(AV_TIME_BASE_Q), i, splice->queues[i].available);
> + ff_bufqueue_add(ctx, &splice->queues[i], frame);
> + }
> + }
> +
> + /* check if all the active queues have available frames */
> + for (i = 0; i < ctx->nb_inputs; i++) {
> + AVFrame *frame;
> + struct FFBufQueue *q = &splice->queues[i];
> +
> + if (!ctx->inputs[i]->closed && !q->available)
> + return 0;
> + if (q->available) {
> + frame = ff_bufqueue_peek(q, 0);
> + if (frame->pts < pts_min) {
> + pts_min = frame->pts;
> + queue_idx = i;
> + }
> + }
> + }
> + if (queue_idx < 0)
> + return 0;
Do you see a situation where that can happen? If not, an assert would be
more adapted.
> +
> + return push_frame(ctx, queue_idx);
> +}
> +
> +static int init(AVFilterContext *ctx, const char *args)
> +{
> + SpliceContext *splice = ctx->priv;
> + const AVFilterPad *outpad = &ctx->filter->outputs[0];
> + int i;
> +
> + splice->queues = av_calloc(splice->nb_inputs, sizeof(splice->queues[0]));
> + if (!splice->queues)
> + return AVERROR(ENOMEM);
> +
> + for (i = 0; i < splice->nb_inputs; i++) {
> + char name[32];
> + AVFilterPad inpad = { 0 };
> +
> + snprintf(name, sizeof(name), "input%d", i);
av_asprintf
> + inpad.type = outpad->type;
> + inpad.name = av_strdup(name);
> + inpad.filter_frame = filter_frame;
> + if (!inpad.name)
> + return AVERROR(ENOMEM);
> +
> + switch (outpad->type) {
> + case AVMEDIA_TYPE_VIDEO:
> + inpad.get_video_buffer = ff_null_get_video_buffer; break;
> + case AVMEDIA_TYPE_AUDIO:
> + inpad.get_audio_buffer = ff_null_get_audio_buffer; break;
> + default:
> + av_assert0(0);
> + }
> + ff_insert_inpad(ctx, i, &inpad);
> + }
> +
> + return 0;
> +}
> +
> +static void uninit(AVFilterContext *ctx)
> +{
> + SpliceContext *splice = ctx->priv;
> + int i;
> +
> + for (i = 0; i < ctx->nb_inputs; i++) {
> + ff_bufqueue_discard_all(&splice->queues[i]);
> + av_freep(&splice->queues[i]);
> + av_freep(&ctx->input_pads[i].name);
> + }
> +}
> +
> +static int config_output(AVFilterLink *outlink)
> +{
> + AVFilterContext *ctx = outlink->src;
> + AVFilterLink *inlink0 = ctx->inputs[0];
> + int i;
> +
> + if (outlink->type == AVMEDIA_TYPE_VIDEO) {
> + outlink->time_base = AV_TIME_BASE_Q;
> + outlink->w = inlink0->w;
> + outlink->h = inlink0->h;
> + outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
> + outlink->format = inlink0->format;
> + outlink->frame_rate = (AVRational) {0, 0};
> + for (i = 1; i < ctx->nb_inputs; i++) {
> + AVFilterLink *inlink = ctx->inputs[i];
> +
> + if (outlink->w != inlink->w ||
> + outlink->h != inlink->h ||
> + outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
> + outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
> + av_log(ctx, AV_LOG_ERROR, "Parameters for input link %s "
> + "(size %dx%d, SAR %d:%d) do not match the corresponding "
> + "output link parameters (%dx%d, SAR %d:%d)\n",
> + ctx->input_pads[i].name, inlink->w, inlink->h,
> + inlink->sample_aspect_ratio.num,
> + inlink->sample_aspect_ratio.den,
> + outlink->w, outlink->h,
> + outlink->sample_aspect_ratio.num,
> + outlink->sample_aspect_ratio.den);
> + return AVERROR(EINVAL);
> + }
> + }
> + }
> +
> + return 0;
> +}
> +
> +static int request_frame(AVFilterLink *outlink)
> +{
> + AVFilterContext *ctx = outlink->src;
> + SpliceContext *splice = ctx->priv;
> + int i, queue_idx = -1;
> + int64_t pts_min = INT64_MAX;
> + AVFrame *frame;
> +
> + splice->req_fulfilled = 0;
> + /* check if all the queues have available frames */
> + for (i = 0; i < ctx->nb_inputs; i++) {
> + struct FFBufQueue *q = &splice->queues[i];
> +
> + while (!ctx->inputs[i]->closed && !q->available &&
> + !splice->req_fulfilled) {
> + int ret = ff_request_frame(ctx->inputs[i]);
> + if (ret < 0 && ret != AVERROR_EOF)
> + return ret;
> + }
> +
> + if (splice->req_fulfilled)
> + return 0;
> +
> + if (q->available) {
> + frame = ff_bufqueue_peek(q, 0);
> + if (frame->pts < pts_min) {
> + pts_min = frame->pts;
> + queue_idx = i;
> + }
> + }
> + }
> +
> + if (queue_idx < 0)
> + return AVERROR_EOF;
> +
> + /* send out oldest frame */
> + return push_frame(ctx, queue_idx);
Suggestion: move the "find oldest input" loop to push_frame, and let it
return in an additional parameter the empty input that is preventing it from
progressing. I suspect you will reduce much code duplication.
> +}
> +
> +static const char *const shorthand[] = { "n", NULL };
> +
> +#if CONFIG_SPLICE_FILTER
> +
> +DEFINE_OPTIONS(splice, VIDEO);
> +AVFILTER_DEFINE_CLASS(splice);
> +
> +static const AVFilterPad splice_outputs[] = {
> + {
> + .name = "default",
> + .type = AVMEDIA_TYPE_VIDEO,
> + .config_props = config_output,
> + .request_frame = request_frame,
> + },
> + { NULL }
> +};
> +
> +AVFilter avfilter_vf_splice = {
> + .name = "splice",
> + .description = NULL_IF_CONFIG_SMALL("Temporally interleave several video inputs."),
> + .priv_size = sizeof(SpliceContext),
> + .init = init,
> + .uninit = uninit,
> + .outputs = splice_outputs,
> + .priv_class = &splice_class,
> + .shorthand = shorthand,
> +};
> +
> +#endif
> +
> +#if CONFIG_ASPLICE_FILTER
> +
> +DEFINE_OPTIONS(asplice, AUDIO);
> +AVFILTER_DEFINE_CLASS(asplice);
> +
> +static const AVFilterPad asplice_outputs[] = {
> + {
> + .name = "default",
> + .type = AVMEDIA_TYPE_AUDIO,
> + .config_props = config_output,
> + .request_frame = request_frame,
> + },
> + { NULL }
> +};
> +
> +AVFilter avfilter_af_asplice = {
> + .name = "asplice",
> + .description = NULL_IF_CONFIG_SMALL("Mux several audio inputs together."),
> + .priv_size = sizeof(SpliceContext),
> + .init = init,
> + .uninit = uninit,
> + .outputs = asplice_outputs,
> + .priv_class = &asplice_class,
> + .shorthand = shorthand,
> +};
> +
> +#endif
Regards,
--
Nicolas George
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: Digital signature
URL: <http://ffmpeg.org/pipermail/ffmpeg-devel/attachments/20130410/5e955085/attachment.asc>
More information about the ffmpeg-devel
mailing list