FFmpeg
setpts.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  * Copyright (c) 2008 Victor Paesa
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * video presentation timestamp (PTS) modification filter
25  */
26 
27 #include "config_components.h"
28 
29 #include <inttypes.h>
30 
31 #include "libavutil/eval.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/time.h"
36 #include "audio.h"
37 #include "avfilter.h"
38 #include "filters.h"
39 #include "internal.h"
40 #include "video.h"
41 
42 static const char *const var_names[] = {
43  "FRAME_RATE", ///< defined only for constant frame-rate video
44  "INTERLACED", ///< tell if the current frame is interlaced
45  "N", ///< frame / sample number (starting at zero)
46  "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
47  "NB_SAMPLES", ///< number of samples in the current frame (only audio)
48  "POS", ///< original position in the file of the frame
49  "PREV_INPTS", ///< previous input PTS
50  "PREV_INT", ///< previous input time in seconds
51  "PREV_OUTPTS", ///< previous output PTS
52  "PREV_OUTT", ///< previous output time in seconds
53  "PTS", ///< original pts in the file of the frame
54  "SAMPLE_RATE", ///< sample rate (only audio)
55  "STARTPTS", ///< PTS at start of movie
56  "STARTT", ///< time at start of movie
57  "T", ///< original time in the file of the frame
58  "TB", ///< timebase
59  "RTCTIME", ///< wallclock (RTC) time in micro seconds
60  "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
61  "S", // Number of samples in the current frame
62  "SR", // Audio sample rate
63  "FR", ///< defined only for constant frame-rate video
64  NULL
65 };
66 
67 enum var_name {
90 };
91 
92 typedef struct SetPTSContext {
93  const AVClass *class;
94  char *expr_str;
99 
101 {
102  SetPTSContext *setpts = ctx->priv;
103  int ret;
104 
105  if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str,
106  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
107  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str);
108  return ret;
109  }
110 
111  setpts->var_values[VAR_N] = 0.0;
112  setpts->var_values[VAR_S] = 0.0;
113  setpts->var_values[VAR_PREV_INPTS] = NAN;
114  setpts->var_values[VAR_PREV_INT] = NAN;
115  setpts->var_values[VAR_PREV_OUTPTS] = NAN;
116  setpts->var_values[VAR_PREV_OUTT] = NAN;
117  setpts->var_values[VAR_STARTPTS] = NAN;
118  setpts->var_values[VAR_STARTT] = NAN;
119  return 0;
120 }
121 
123 {
124  AVFilterContext *ctx = inlink->dst;
125  SetPTSContext *setpts = ctx->priv;
126 
127  setpts->type = inlink->type;
128  setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
129  setpts->var_values[VAR_RTCSTART] = av_gettime();
130 
131  setpts->var_values[VAR_SR] =
132  setpts->var_values[VAR_SAMPLE_RATE] =
133  setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
134 
135  setpts->var_values[VAR_FRAME_RATE] =
136  setpts->var_values[VAR_FR] = inlink->frame_rate.num &&
137  inlink->frame_rate.den ?
138  av_q2d(inlink->frame_rate) : NAN;
139 
140  av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
141  setpts->var_values[VAR_TB],
142  setpts->var_values[VAR_FRAME_RATE],
143  setpts->var_values[VAR_SAMPLE_RATE]);
144  return 0;
145 }
146 
147 #define BUF_SIZE 64
148 
149 static inline char *double2int64str(char *buf, double v)
150 {
151  if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
152  else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
153  return buf;
154 }
155 
156 static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
157 {
158  if (isnan(setpts->var_values[VAR_STARTPTS])) {
159  setpts->var_values[VAR_STARTPTS] = TS2D(pts);
160  setpts->var_values[VAR_STARTT ] = TS2T(pts, inlink->time_base);
161  }
162  setpts->var_values[VAR_PTS ] = TS2D(pts);
163  setpts->var_values[VAR_T ] = TS2T(pts, inlink->time_base);
164  setpts->var_values[VAR_POS ] = !frame || frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
165  setpts->var_values[VAR_RTCTIME ] = av_gettime();
166 
167  if (frame) {
168  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
169  setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
170  } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
171  setpts->var_values[VAR_S] = frame->nb_samples;
172  setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
173  }
174  }
175 
176  return av_expr_eval(setpts->expr, setpts->var_values, NULL);
177 }
178 #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
179 
181 {
182  SetPTSContext *setpts = inlink->dst->priv;
183  int64_t in_pts = frame->pts;
184  double d;
185 
186  d = eval_pts(setpts, inlink, frame, frame->pts);
187  frame->pts = D2TS(d);
188 
189  av_log(inlink->dst, AV_LOG_TRACE,
190  "N:%"PRId64" PTS:%s T:%f POS:%s",
191  (int64_t)setpts->var_values[VAR_N],
192  d2istr(setpts->var_values[VAR_PTS]),
193  setpts->var_values[VAR_T],
194  d2istr(setpts->var_values[VAR_POS]));
195  switch (inlink->type) {
196  case AVMEDIA_TYPE_VIDEO:
197  av_log(inlink->dst, AV_LOG_TRACE, " INTERLACED:%"PRId64,
198  (int64_t)setpts->var_values[VAR_INTERLACED]);
199  break;
200  case AVMEDIA_TYPE_AUDIO:
201  av_log(inlink->dst, AV_LOG_TRACE, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
202  (int64_t)setpts->var_values[VAR_NB_SAMPLES],
203  (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
204  break;
205  }
206  av_log(inlink->dst, AV_LOG_TRACE, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
207 
208  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
209  setpts->var_values[VAR_N] += 1.0;
210  } else {
211  setpts->var_values[VAR_N] += frame->nb_samples;
212  }
213 
214  setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
215  setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base);
216  setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
217  setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
218  if (setpts->type == AVMEDIA_TYPE_AUDIO) {
219  setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
220  }
221  return ff_filter_frame(inlink->dst->outputs[0], frame);
222 }
223 
225 {
226  SetPTSContext *setpts = ctx->priv;
227  AVFilterLink *inlink = ctx->inputs[0];
228  AVFilterLink *outlink = ctx->outputs[0];
229  AVFrame *in;
230  int status;
231  int64_t pts;
232  int ret;
233 
235 
237  if (ret < 0)
238  return ret;
239  if (ret > 0)
240  return filter_frame(inlink, in);
241 
243  double d = eval_pts(setpts, inlink, NULL, pts);
244 
245  av_log(ctx, AV_LOG_TRACE, "N:EOF PTS:%s T:%f POS:%s -> PTS:%s T:%f\n",
246  d2istr(setpts->var_values[VAR_PTS]),
247  setpts->var_values[VAR_T],
248  d2istr(setpts->var_values[VAR_POS]),
249  d2istr(d), TS2T(d, inlink->time_base));
250  ff_outlink_set_status(outlink, status, D2TS(d));
251  return 0;
252  }
253 
255 
256  return FFERROR_NOT_READY;
257 }
258 
260 {
261  SetPTSContext *setpts = ctx->priv;
262  av_expr_free(setpts->expr);
263  setpts->expr = NULL;
264 }
265 
266 #define OFFSET(x) offsetof(SetPTSContext, x)
267 #define V AV_OPT_FLAG_VIDEO_PARAM
268 #define A AV_OPT_FLAG_AUDIO_PARAM
269 #define F AV_OPT_FLAG_FILTERING_PARAM
270 
271 #if CONFIG_SETPTS_FILTER
272 static const AVOption setpts_options[] = {
273  { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = V|F },
274  { NULL }
275 };
276 AVFILTER_DEFINE_CLASS(setpts);
277 
278 static const AVFilterPad avfilter_vf_setpts_inputs[] = {
279  {
280  .name = "default",
281  .type = AVMEDIA_TYPE_VIDEO,
282  .config_props = config_input,
283  },
284 };
285 
286 static const AVFilterPad avfilter_vf_setpts_outputs[] = {
287  {
288  .name = "default",
289  .type = AVMEDIA_TYPE_VIDEO,
290  },
291 };
292 
293 const AVFilter ff_vf_setpts = {
294  .name = "setpts",
295  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
296  .init = init,
297  .activate = activate,
298  .uninit = uninit,
300 
301  .priv_size = sizeof(SetPTSContext),
302  .priv_class = &setpts_class,
303 
304  FILTER_INPUTS(avfilter_vf_setpts_inputs),
305  FILTER_OUTPUTS(avfilter_vf_setpts_outputs),
306 };
307 #endif /* CONFIG_SETPTS_FILTER */
308 
309 #if CONFIG_ASETPTS_FILTER
310 
311 static const AVOption asetpts_options[] = {
312  { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = A|F },
313  { NULL }
314 };
315 AVFILTER_DEFINE_CLASS(asetpts);
316 
317 static const AVFilterPad asetpts_inputs[] = {
318  {
319  .name = "default",
320  .type = AVMEDIA_TYPE_AUDIO,
321  .config_props = config_input,
322  },
323 };
324 
325 static const AVFilterPad asetpts_outputs[] = {
326  {
327  .name = "default",
328  .type = AVMEDIA_TYPE_AUDIO,
329  },
330 };
331 
332 const AVFilter ff_af_asetpts = {
333  .name = "asetpts",
334  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
335  .init = init,
336  .activate = activate,
337  .uninit = uninit,
338  .priv_size = sizeof(SetPTSContext),
339  .priv_class = &asetpts_class,
341  FILTER_INPUTS(asetpts_inputs),
342  FILTER_OUTPUTS(asetpts_outputs),
343 };
344 #endif /* CONFIG_ASETPTS_FILTER */
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
opt.h
BUF_SIZE
#define BUF_SIZE
Definition: setpts.c:147
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
SetPTSContext
Definition: setpts.c:92
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
D2TS
#define D2TS(d)
Definition: internal.h:263
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
VAR_STARTT
@ VAR_STARTT
Definition: setpts.c:81
AVOption
AVOption.
Definition: opt.h:251
VAR_FRAME_RATE
@ VAR_FRAME_RATE
Definition: setpts.c:68
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
VAR_SAMPLE_RATE
@ VAR_SAMPLE_RATE
Definition: setpts.c:79
mathematics.h
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
VAR_N
@ VAR_N
Definition: setpts.c:70
video.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: setpts.c:180
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1364
VAR_PREV_INT
@ VAR_PREV_INT
Definition: setpts.c:75
VAR_RTCSTART
@ VAR_RTCSTART
Definition: setpts.c:85
SetPTSContext::var_values
double var_values[VAR_VARS_NB]
Definition: setpts.c:96
pts
static int64_t pts
Definition: transcode_aac.c:653
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
OFFSET
#define OFFSET(x)
Definition: setpts.c:266
TS2T
#define TS2T(ts, tb)
Definition: internal.h:265
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
init
static av_cold int init(AVFilterContext *ctx)
Definition: setpts.c:100
activate
static int activate(AVFilterContext *ctx)
Definition: setpts.c:224
VAR_SR
@ VAR_SR
Definition: setpts.c:87
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
eval_pts
static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
Definition: setpts.c:156
filters.h
var_name
var_name
Definition: noise_bsf.c:46
VAR_PREV_INPTS
@ VAR_PREV_INPTS
Definition: setpts.c:74
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
AVExpr
Definition: eval.c:157
NAN
#define NAN
Definition: mathematics.h:64
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
TS2D
#define TS2D(ts)
Definition: internal.h:264
NULL
#define NULL
Definition: coverity.c:32
d2istr
#define d2istr(v)
Definition: setpts.c:178
isnan
#define isnan(x)
Definition: libm.h:340
VAR_RTCTIME
@ VAR_RTCTIME
Definition: setpts.c:84
VAR_PTS
@ VAR_PTS
Definition: setpts.c:78
time.h
SetPTSContext::expr_str
char * expr_str
Definition: setpts.c:94
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1318
eval.h
AVMediaType
AVMediaType
Definition: avutil.h:199
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
VAR_NB_CONSUMED_SAMPLES
@ VAR_NB_CONSUMED_SAMPLES
Definition: setpts.c:71
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:329
ff_vf_setpts
const AVFilter ff_vf_setpts
A
#define A
Definition: setpts.c:268
internal.h
F
#define F
Definition: setpts.c:269
VAR_TB
@ VAR_TB
Definition: setpts.c:83
VAR_FR
@ VAR_FR
Definition: setpts.c:88
VAR_POS
@ VAR_POS
Definition: setpts.c:73
double2int64str
static char * double2int64str(char *buf, double v)
Definition: setpts.c:149
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
VAR_PREV_OUTT
@ VAR_PREV_OUTT
Definition: setpts.c:77
VAR_PREV_OUTPTS
@ VAR_PREV_OUTPTS
Definition: setpts.c:76
AVFilter
Filter definition.
Definition: avfilter.h:161
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
SetPTSContext::expr
AVExpr * expr
Definition: setpts.c:95
avfilter.h
VAR_NB_SAMPLES
@ VAR_NB_SAMPLES
Definition: setpts.c:72
V
#define V
Definition: setpts.c:267
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
var_names
static const char *const var_names[]
Definition: setpts.c:42
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
VAR_STARTPTS
@ VAR_STARTPTS
Definition: setpts.c:80
av_gettime
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
VAR_S
@ VAR_S
Definition: setpts.c:86
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
VAR_VARS_NB
@ VAR_VARS_NB
Definition: setpts.c:89
d
d
Definition: ffmpeg_filter.c:156
config_input
static int config_input(AVFilterLink *inlink)
Definition: setpts.c:122
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
ff_af_asetpts
const AVFilter ff_af_asetpts
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: setpts.c:259
snprintf
#define snprintf
Definition: snprintf.h:34
SetPTSContext::type
enum AVMediaType type
Definition: setpts.c:97
VAR_T
@ VAR_T
Definition: setpts.c:82
VAR_INTERLACED
@ VAR_INTERLACED
Definition: setpts.c:69