FFmpeg
vf_dejudder.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014 Nicholas Robbins
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * remove judder in video stream
24  *
25  * Algorithm:
26  * - If the old packets had PTS of old_pts[i]. Replace these with new
27  * value based on the running average of the last n=cycle frames. So
28  *
29  * new_pts[i] = Sum(k=i-n+1, i, old_pts[k])/n
30  * + (old_pts[i]-old_pts[i-n])*(n-1)/2n
31  *
32  * For any repeating pattern of length n of judder this will produce
33  * an even progression of PTS's.
34  *
35  * - In order to avoid calculating this sum ever frame, a running tally
36  * is maintained in ctx->new_pts. Each frame the new term at the start
37  * of the sum is added, the one and the end is removed, and the offset
38  * terms (second line in formula above) are recalculated.
39  *
40  * - To aid in this a ringbuffer of the last n-2 PTS's is maintained in
41  * ctx->ringbuff. With the indices of the first two and last two entries
42  * stored in i1, i2, i3, & i4.
43  *
44  * - To ensure that the new PTS's are integers, time_base is divided
45  * by 2n. This removes the division in the new_pts calculation.
46  *
47  * - frame_rate is also multiplied by 2n to allow the frames to fall
48  * where they may in what may now be a VFR output. This produces more
49  * even output then setting frame_rate=1/0 in practice.
50  */
51 
52 #include "libavutil/opt.h"
53 #include "libavutil/mathematics.h"
54 #include "avfilter.h"
55 #include "internal.h"
56 #include "video.h"
57 
58 typedef struct DejudderContext {
59  const AVClass *class;
60  int64_t *ringbuff;
61  int i1, i2, i3, i4;
62  int64_t new_pts;
64 
65  /* options */
66  int cycle;
68 
69 #define OFFSET(x) offsetof(DejudderContext, x)
70 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
71 
72 static const AVOption dejudder_options[] = {
73  {"cycle", "set the length of the cycle to use for dejuddering",
74  OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 4}, 2, 240, .flags = FLAGS},
75  {NULL}
76 };
77 
78 AVFILTER_DEFINE_CLASS(dejudder);
79 
80 static int config_out_props(AVFilterLink *outlink)
81 {
82  AVFilterContext *ctx = outlink->src;
83  DejudderContext *s = ctx->priv;
84  AVFilterLink *inlink = outlink->src->inputs[0];
85 
86  outlink->time_base = av_mul_q(inlink->time_base, av_make_q(1, 2 * s->cycle));
87  outlink->frame_rate = av_mul_q(inlink->frame_rate, av_make_q(2 * s->cycle, 1));
88 
89  av_log(ctx, AV_LOG_VERBOSE, "cycle:%d\n", s->cycle);
90 
91  return 0;
92 }
93 
95 {
96  DejudderContext *s = ctx->priv;
97 
98  s->ringbuff = av_mallocz_array(s->cycle+2, sizeof(*s->ringbuff));
99  if (!s->ringbuff)
100  return AVERROR(ENOMEM);
101 
102  s->new_pts = 0;
103  s->i1 = 0;
104  s->i2 = 1;
105  s->i3 = 2;
106  s->i4 = 3;
107  s->start_count = s->cycle + 2;
108 
109  return 0;
110 }
111 
113 {
114  DejudderContext *s = ctx->priv;
115 
116  av_freep(&(s->ringbuff));
117 }
118 
120 {
121  int k;
122  AVFilterContext *ctx = inlink->dst;
123  AVFilterLink *outlink = ctx->outputs[0];
124  DejudderContext *s = ctx->priv;
125  int64_t *judbuff = s->ringbuff;
126  int64_t next_pts = frame->pts;
127  int64_t offset;
128 
129  if (next_pts == AV_NOPTS_VALUE)
130  return ff_filter_frame(outlink, frame);
131 
132  if (s->start_count) {
133  s->start_count--;
134  s->new_pts = next_pts * 2 * s->cycle;
135  } else {
136  if (next_pts < judbuff[s->i2]) {
137  offset = next_pts + judbuff[s->i3] - judbuff[s->i4] - judbuff[s->i1];
138  for (k = 0; k < s->cycle + 2; k++)
139  judbuff[k] += offset;
140  }
141  s->new_pts += (s->cycle - 1) * (judbuff[s->i3] - judbuff[s->i1])
142  + (s->cycle + 1) * (next_pts - judbuff[s->i4]);
143  }
144 
145  judbuff[s->i2] = next_pts;
146  s->i1 = s->i2;
147  s->i2 = s->i3;
148  s->i3 = s->i4;
149  s->i4 = (s->i4 + 1) % (s->cycle + 2);
150 
151  frame->pts = s->new_pts;
152 
153  for (k = 0; k < s->cycle + 2; k++)
154  av_log(ctx, AV_LOG_DEBUG, "%"PRId64"\t", judbuff[k]);
155  av_log(ctx, AV_LOG_DEBUG, "next=%"PRId64", new=%"PRId64"\n", next_pts, frame->pts);
156 
157  return ff_filter_frame(outlink, frame);
158 }
159 
160 static const AVFilterPad dejudder_inputs[] = {
161  {
162  .name = "default",
163  .type = AVMEDIA_TYPE_VIDEO,
164  .filter_frame = filter_frame,
165  },
166  { NULL }
167 };
168 
169 static const AVFilterPad dejudder_outputs[] = {
170  {
171  .name = "default",
172  .type = AVMEDIA_TYPE_VIDEO,
173  .config_props = config_out_props,
174  },
175  { NULL }
176 };
177 
179  .name = "dejudder",
180  .description = NULL_IF_CONFIG_SMALL("Remove judder produced by pullup."),
181  .priv_size = sizeof(DejudderContext),
182  .priv_class = &dejudder_class,
183  .inputs = dejudder_inputs,
184  .outputs = dejudder_outputs,
185  .init = dejudder_init,
187 };
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
Main libavfilter public API header.
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static const AVFilterPad dejudder_inputs[]
Definition: vf_dejudder.c:160
static const AVFilterPad dejudder_outputs[]
Definition: vf_dejudder.c:169
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
#define av_cold
Definition: attributes.h:82
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
int64_t new_pts
Definition: vf_dejudder.c:62
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_dejudder.c:119
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static av_cold void dejudder_uninit(AVFilterContext *ctx)
Definition: vf_dejudder.c:112
static int config_out_props(AVFilterLink *outlink)
Definition: vf_dejudder.c:80
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
#define FLAGS
Definition: vf_dejudder.c:70
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
int64_t * ringbuff
Definition: vf_dejudder.c:60
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static const AVOption dejudder_options[]
Definition: vf_dejudder.c:72
AVFilter ff_vf_dejudder
Definition: vf_dejudder.c:178
AVFILTER_DEFINE_CLASS(dejudder)
An instance of a filter.
Definition: avfilter.h:338
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
#define av_freep(p)
#define OFFSET(x)
Definition: vf_dejudder.c:69
static av_cold int dejudder_init(AVFilterContext *ctx)
Definition: vf_dejudder.c:94
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191