FFmpeg
setpts.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  * Copyright (c) 2008 Victor Paesa
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * video presentation timestamp (PTS) modification filter
25  */
26 
27 #include <inttypes.h>
28 
29 #include "libavutil/eval.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/time.h"
34 #include "audio.h"
35 #include "avfilter.h"
36 #include "internal.h"
37 #include "video.h"
38 
39 static const char *const var_names[] = {
40  "FRAME_RATE", ///< defined only for constant frame-rate video
41  "INTERLACED", ///< tell if the current frame is interlaced
42  "N", ///< frame / sample number (starting at zero)
43  "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
44  "NB_SAMPLES", ///< number of samples in the current frame (only audio)
45  "POS", ///< original position in the file of the frame
46  "PREV_INPTS", ///< previous input PTS
47  "PREV_INT", ///< previous input time in seconds
48  "PREV_OUTPTS", ///< previous output PTS
49  "PREV_OUTT", ///< previous output time in seconds
50  "PTS", ///< original pts in the file of the frame
51  "SAMPLE_RATE", ///< sample rate (only audio)
52  "STARTPTS", ///< PTS at start of movie
53  "STARTT", ///< time at start of movie
54  "T", ///< original time in the file of the frame
55  "TB", ///< timebase
56  "RTCTIME", ///< wallclock (RTC) time in micro seconds
57  "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
58  "S", // Number of samples in the current frame
59  "SR", // Audio sample rate
60  "FR", ///< defined only for constant frame-rate video
61  NULL
62 };
63 
64 enum var_name {
87 };
88 
89 typedef struct SetPTSContext {
90  const AVClass *class;
91  char *expr_str;
96 
98 {
99  SetPTSContext *setpts = ctx->priv;
100  int ret;
101 
102  if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str,
103  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
104  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str);
105  return ret;
106  }
107 
108  setpts->var_values[VAR_N] = 0.0;
109  setpts->var_values[VAR_S] = 0.0;
110  setpts->var_values[VAR_PREV_INPTS] = NAN;
111  setpts->var_values[VAR_PREV_INT] = NAN;
112  setpts->var_values[VAR_PREV_OUTPTS] = NAN;
113  setpts->var_values[VAR_PREV_OUTT] = NAN;
114  setpts->var_values[VAR_STARTPTS] = NAN;
115  setpts->var_values[VAR_STARTT] = NAN;
116  return 0;
117 }
118 
120 {
121  AVFilterContext *ctx = inlink->dst;
122  SetPTSContext *setpts = ctx->priv;
123 
124  setpts->type = inlink->type;
125  setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
126  setpts->var_values[VAR_RTCSTART] = av_gettime();
127 
128  setpts->var_values[VAR_SR] =
129  setpts->var_values[VAR_SAMPLE_RATE] =
130  setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
131 
132  setpts->var_values[VAR_FRAME_RATE] =
133  setpts->var_values[VAR_FR] = inlink->frame_rate.num &&
134  inlink->frame_rate.den ?
135  av_q2d(inlink->frame_rate) : NAN;
136 
137  av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
138  setpts->var_values[VAR_TB],
139  setpts->var_values[VAR_FRAME_RATE],
140  setpts->var_values[VAR_SAMPLE_RATE]);
141  return 0;
142 }
143 
144 #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
145 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
146 #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
147 
148 #define BUF_SIZE 64
149 
150 static inline char *double2int64str(char *buf, double v)
151 {
152  if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
153  else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
154  return buf;
155 }
156 
157 #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
158 
160 {
161  SetPTSContext *setpts = inlink->dst->priv;
162  int64_t in_pts = frame->pts;
163  double d;
164 
165  if (isnan(setpts->var_values[VAR_STARTPTS])) {
166  setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
167  setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base);
168  }
169  setpts->var_values[VAR_PTS ] = TS2D(frame->pts);
170  setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base);
171  setpts->var_values[VAR_POS ] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
172  setpts->var_values[VAR_RTCTIME ] = av_gettime();
173 
174  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
175  setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
176  } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
177  setpts->var_values[VAR_S] = frame->nb_samples;
178  setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
179  }
180 
181  d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
182  frame->pts = D2TS(d);
183 
184  av_log(inlink->dst, AV_LOG_TRACE,
185  "N:%"PRId64" PTS:%s T:%f POS:%s",
186  (int64_t)setpts->var_values[VAR_N],
187  d2istr(setpts->var_values[VAR_PTS]),
188  setpts->var_values[VAR_T],
189  d2istr(setpts->var_values[VAR_POS]));
190  switch (inlink->type) {
191  case AVMEDIA_TYPE_VIDEO:
192  av_log(inlink->dst, AV_LOG_TRACE, " INTERLACED:%"PRId64,
193  (int64_t)setpts->var_values[VAR_INTERLACED]);
194  break;
195  case AVMEDIA_TYPE_AUDIO:
196  av_log(inlink->dst, AV_LOG_TRACE, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
197  (int64_t)setpts->var_values[VAR_NB_SAMPLES],
198  (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
199  break;
200  }
201  av_log(inlink->dst, AV_LOG_TRACE, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
202 
203  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
204  setpts->var_values[VAR_N] += 1.0;
205  } else {
206  setpts->var_values[VAR_N] += frame->nb_samples;
207  }
208 
209  setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
210  setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base);
211  setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
212  setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
213  if (setpts->type == AVMEDIA_TYPE_AUDIO) {
214  setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
215  }
216  return ff_filter_frame(inlink->dst->outputs[0], frame);
217 }
218 
220 {
221  SetPTSContext *setpts = ctx->priv;
222  av_expr_free(setpts->expr);
223  setpts->expr = NULL;
224 }
225 
226 #define OFFSET(x) offsetof(SetPTSContext, x)
227 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
228 static const AVOption options[] = {
229  { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS },
230  { NULL }
231 };
232 
233 #if CONFIG_SETPTS_FILTER
234 #define setpts_options options
235 AVFILTER_DEFINE_CLASS(setpts);
236 
237 static const AVFilterPad avfilter_vf_setpts_inputs[] = {
238  {
239  .name = "default",
240  .type = AVMEDIA_TYPE_VIDEO,
241  .config_props = config_input,
242  .filter_frame = filter_frame,
243  },
244  { NULL }
245 };
246 
247 static const AVFilterPad avfilter_vf_setpts_outputs[] = {
248  {
249  .name = "default",
250  .type = AVMEDIA_TYPE_VIDEO,
251  },
252  { NULL }
253 };
254 
256  .name = "setpts",
257  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
258  .init = init,
259  .uninit = uninit,
260 
261  .priv_size = sizeof(SetPTSContext),
262  .priv_class = &setpts_class,
263 
264  .inputs = avfilter_vf_setpts_inputs,
265  .outputs = avfilter_vf_setpts_outputs,
266 };
267 #endif /* CONFIG_SETPTS_FILTER */
268 
269 #if CONFIG_ASETPTS_FILTER
270 
271 #define asetpts_options options
272 AVFILTER_DEFINE_CLASS(asetpts);
273 
274 static const AVFilterPad asetpts_inputs[] = {
275  {
276  .name = "default",
277  .type = AVMEDIA_TYPE_AUDIO,
278  .config_props = config_input,
279  .filter_frame = filter_frame,
280  },
281  { NULL }
282 };
283 
284 static const AVFilterPad asetpts_outputs[] = {
285  {
286  .name = "default",
287  .type = AVMEDIA_TYPE_AUDIO,
288  },
289  { NULL }
290 };
291 
293  .name = "asetpts",
294  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
295  .init = init,
296  .uninit = uninit,
297  .priv_size = sizeof(SetPTSContext),
298  .priv_class = &asetpts_class,
299  .inputs = asetpts_inputs,
300  .outputs = asetpts_outputs,
301 };
302 #endif /* CONFIG_ASETPTS_FILTER */
static const char *const var_names[]
Definition: setpts.c:39
#define NULL
Definition: coverity.c:32
Definition: setpts.c:79
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:566
Main libavfilter public API header.
static int config_input(AVFilterLink *inlink)
Definition: setpts.c:119
int num
Numerator.
Definition: rational.h:59
Definition: setpts.c:85
enum AVMediaType type
Definition: setpts.c:94
double var_values[VAR_VARS_NB]
Definition: setpts.c:93
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
#define d2istr(v)
Definition: setpts.c:157
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVFilter ff_af_asetpts
#define av_cold
Definition: attributes.h:82
AVOptions.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
Definition: eval.c:157
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
Definition: setpts.c:70
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
Definition: setpts.c:67
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
Definition: setpts.c:80
#define BUF_SIZE
Definition: setpts.c:148
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
AVFilter ff_vf_setpts
var_name
Definition: aeval.c:46
common internal API header
static const AVOption options[]
Definition: setpts.c:228
#define TS2T(ts, tb)
Definition: setpts.c:146
Definition: setpts.c:83
#define NAN
Definition: mathematics.h:64
char * expr_str
Definition: setpts.c:91
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static char * double2int64str(char *buf, double v)
Definition: setpts.c:150
#define D2TS(d)
Definition: setpts.c:144
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static av_cold void uninit(AVFilterContext *ctx)
Definition: setpts.c:219
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
#define isnan(x)
Definition: libm.h:340
#define TS2D(ts)
Definition: setpts.c:145
AVMediaType
Definition: avutil.h:199
const char * name
Filter name.
Definition: avfilter.h:148
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: setpts.c:84
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: setpts.c:159
int den
Denominator.
Definition: rational.h:60
#define OFFSET(x)
Definition: setpts.c:226
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
An instance of a filter.
Definition: avfilter.h:338
AVExpr * expr
Definition: setpts.c:92
#define FLAGS
Definition: setpts.c:227
internal API functions
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
static av_cold int init(AVFilterContext *ctx)
Definition: setpts.c:97
Definition: setpts.c:75
simple arithmetic expression evaluator