FFmpeg
setpts.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  * Copyright (c) 2008 Victor Paesa
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * video presentation timestamp (PTS) modification filter
25  */
26 
27 #include <inttypes.h>
28 
29 #include "libavutil/eval.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/time.h"
34 #include "audio.h"
35 #include "avfilter.h"
36 #include "filters.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 static const char *const var_names[] = {
41  "FRAME_RATE", ///< defined only for constant frame-rate video
42  "INTERLACED", ///< tell if the current frame is interlaced
43  "N", ///< frame / sample number (starting at zero)
44  "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
45  "NB_SAMPLES", ///< number of samples in the current frame (only audio)
46  "POS", ///< original position in the file of the frame
47  "PREV_INPTS", ///< previous input PTS
48  "PREV_INT", ///< previous input time in seconds
49  "PREV_OUTPTS", ///< previous output PTS
50  "PREV_OUTT", ///< previous output time in seconds
51  "PTS", ///< original pts in the file of the frame
52  "SAMPLE_RATE", ///< sample rate (only audio)
53  "STARTPTS", ///< PTS at start of movie
54  "STARTT", ///< time at start of movie
55  "T", ///< original time in the file of the frame
56  "TB", ///< timebase
57  "RTCTIME", ///< wallclock (RTC) time in micro seconds
58  "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
59  "S", // Number of samples in the current frame
60  "SR", // Audio sample rate
61  "FR", ///< defined only for constant frame-rate video
62  NULL
63 };
64 
65 enum var_name {
88 };
89 
90 typedef struct SetPTSContext {
91  const AVClass *class;
92  char *expr_str;
97 
99 {
100  SetPTSContext *setpts = ctx->priv;
101  int ret;
102 
103  if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str,
104  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
105  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str);
106  return ret;
107  }
108 
109  setpts->var_values[VAR_N] = 0.0;
110  setpts->var_values[VAR_S] = 0.0;
111  setpts->var_values[VAR_PREV_INPTS] = NAN;
112  setpts->var_values[VAR_PREV_INT] = NAN;
113  setpts->var_values[VAR_PREV_OUTPTS] = NAN;
114  setpts->var_values[VAR_PREV_OUTT] = NAN;
115  setpts->var_values[VAR_STARTPTS] = NAN;
116  setpts->var_values[VAR_STARTT] = NAN;
117  return 0;
118 }
119 
121 {
122  AVFilterContext *ctx = inlink->dst;
123  SetPTSContext *setpts = ctx->priv;
124 
125  setpts->type = inlink->type;
126  setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
127  setpts->var_values[VAR_RTCSTART] = av_gettime();
128 
129  setpts->var_values[VAR_SR] =
130  setpts->var_values[VAR_SAMPLE_RATE] =
131  setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
132 
133  setpts->var_values[VAR_FRAME_RATE] =
134  setpts->var_values[VAR_FR] = inlink->frame_rate.num &&
135  inlink->frame_rate.den ?
136  av_q2d(inlink->frame_rate) : NAN;
137 
138  av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
139  setpts->var_values[VAR_TB],
140  setpts->var_values[VAR_FRAME_RATE],
141  setpts->var_values[VAR_SAMPLE_RATE]);
142  return 0;
143 }
144 
145 #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
146 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
147 #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
148 
149 #define BUF_SIZE 64
150 
151 static inline char *double2int64str(char *buf, double v)
152 {
153  if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
154  else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
155  return buf;
156 }
157 
158 static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
159 {
160  if (isnan(setpts->var_values[VAR_STARTPTS])) {
161  setpts->var_values[VAR_STARTPTS] = TS2D(pts);
162  setpts->var_values[VAR_STARTT ] = TS2T(pts, inlink->time_base);
163  }
164  setpts->var_values[VAR_PTS ] = TS2D(pts);
165  setpts->var_values[VAR_T ] = TS2T(pts, inlink->time_base);
166  setpts->var_values[VAR_POS ] = !frame || frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
167  setpts->var_values[VAR_RTCTIME ] = av_gettime();
168 
169  if (frame) {
170  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
171  setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
172  } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
173  setpts->var_values[VAR_S] = frame->nb_samples;
174  setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
175  }
176  }
177 
178  return av_expr_eval(setpts->expr, setpts->var_values, NULL);
179 }
180 #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
181 
183 {
184  SetPTSContext *setpts = inlink->dst->priv;
185  int64_t in_pts = frame->pts;
186  double d;
187 
188  d = eval_pts(setpts, inlink, frame, frame->pts);
189  frame->pts = D2TS(d);
190 
191  av_log(inlink->dst, AV_LOG_TRACE,
192  "N:%"PRId64" PTS:%s T:%f POS:%s",
193  (int64_t)setpts->var_values[VAR_N],
194  d2istr(setpts->var_values[VAR_PTS]),
195  setpts->var_values[VAR_T],
196  d2istr(setpts->var_values[VAR_POS]));
197  switch (inlink->type) {
198  case AVMEDIA_TYPE_VIDEO:
199  av_log(inlink->dst, AV_LOG_TRACE, " INTERLACED:%"PRId64,
200  (int64_t)setpts->var_values[VAR_INTERLACED]);
201  break;
202  case AVMEDIA_TYPE_AUDIO:
203  av_log(inlink->dst, AV_LOG_TRACE, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
204  (int64_t)setpts->var_values[VAR_NB_SAMPLES],
205  (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
206  break;
207  }
208  av_log(inlink->dst, AV_LOG_TRACE, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
209 
210  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
211  setpts->var_values[VAR_N] += 1.0;
212  } else {
213  setpts->var_values[VAR_N] += frame->nb_samples;
214  }
215 
216  setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
217  setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base);
218  setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
219  setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
220  if (setpts->type == AVMEDIA_TYPE_AUDIO) {
221  setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
222  }
223  return ff_filter_frame(inlink->dst->outputs[0], frame);
224 }
225 
227 {
228  SetPTSContext *setpts = ctx->priv;
229  AVFilterLink *inlink = ctx->inputs[0];
230  AVFilterLink *outlink = ctx->outputs[0];
231  AVFrame *in;
232  int status;
233  int64_t pts;
234  int ret;
235 
236  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
237 
238  ret = ff_inlink_consume_frame(inlink, &in);
239  if (ret < 0)
240  return ret;
241  if (ret > 0)
242  return filter_frame(inlink, in);
243 
244  if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
245  double d = eval_pts(setpts, inlink, NULL, pts);
246 
247  av_log(ctx, AV_LOG_TRACE, "N:EOF PTS:%s T:%f POS:%s -> PTS:%s T:%f\n",
248  d2istr(setpts->var_values[VAR_PTS]),
249  setpts->var_values[VAR_T],
250  d2istr(setpts->var_values[VAR_POS]),
251  d2istr(d), TS2T(d, inlink->time_base));
252  ff_outlink_set_status(outlink, status, D2TS(d));
253  return 0;
254  }
255 
256  FF_FILTER_FORWARD_WANTED(outlink, inlink);
257 
258  return FFERROR_NOT_READY;
259 }
260 
262 {
263  SetPTSContext *setpts = ctx->priv;
264  av_expr_free(setpts->expr);
265  setpts->expr = NULL;
266 }
267 
268 #define OFFSET(x) offsetof(SetPTSContext, x)
269 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
270 static const AVOption options[] = {
271  { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS },
272  { NULL }
273 };
274 
275 #if CONFIG_SETPTS_FILTER
276 #define setpts_options options
277 AVFILTER_DEFINE_CLASS(setpts);
278 
279 static const AVFilterPad avfilter_vf_setpts_inputs[] = {
280  {
281  .name = "default",
282  .type = AVMEDIA_TYPE_VIDEO,
283  .config_props = config_input,
284  },
285  { NULL }
286 };
287 
288 static const AVFilterPad avfilter_vf_setpts_outputs[] = {
289  {
290  .name = "default",
291  .type = AVMEDIA_TYPE_VIDEO,
292  },
293  { NULL }
294 };
295 
297  .name = "setpts",
298  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
299  .init = init,
300  .activate = activate,
301  .uninit = uninit,
302 
303  .priv_size = sizeof(SetPTSContext),
304  .priv_class = &setpts_class,
305 
306  .inputs = avfilter_vf_setpts_inputs,
307  .outputs = avfilter_vf_setpts_outputs,
308 };
309 #endif /* CONFIG_SETPTS_FILTER */
310 
311 #if CONFIG_ASETPTS_FILTER
312 
313 #define asetpts_options options
314 AVFILTER_DEFINE_CLASS(asetpts);
315 
316 static const AVFilterPad asetpts_inputs[] = {
317  {
318  .name = "default",
319  .type = AVMEDIA_TYPE_AUDIO,
320  .config_props = config_input,
321  },
322  { NULL }
323 };
324 
325 static const AVFilterPad asetpts_outputs[] = {
326  {
327  .name = "default",
328  .type = AVMEDIA_TYPE_AUDIO,
329  },
330  { NULL }
331 };
332 
334  .name = "asetpts",
335  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
336  .init = init,
337  .activate = activate,
338  .uninit = uninit,
339  .priv_size = sizeof(SetPTSContext),
340  .priv_class = &asetpts_class,
341  .inputs = asetpts_inputs,
342  .outputs = asetpts_outputs,
343 };
344 #endif /* CONFIG_ASETPTS_FILTER */
static const char *const var_names[]
Definition: setpts.c:40
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1494
#define NULL
Definition: coverity.c:32
Definition: setpts.c:80
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:566
Main libavfilter public API header.
static int config_input(AVFilterLink *inlink)
Definition: setpts.c:120
int num
Numerator.
Definition: rational.h:59
Definition: setpts.c:86
enum AVMediaType type
Definition: setpts.c:95
double var_values[VAR_VARS_NB]
Definition: setpts.c:94
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:683
return FFERROR_NOT_READY
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
#define d2istr(v)
Definition: setpts.c:180
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
AVFilter ff_af_asetpts
#define av_cold
Definition: attributes.h:82
AVOptions.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
Definition: eval.c:157
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
Definition: setpts.c:71
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
Definition: setpts.c:68
#define av_log(a,...)
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
A filter pad used for either input or output.
Definition: internal.h:54
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1449
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
Definition: setpts.c:81
#define BUF_SIZE
Definition: setpts.c:149
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
AVFilter ff_vf_setpts
var_name
Definition: aeval.c:46
common internal API header
static const AVOption options[]
Definition: setpts.c:270
#define TS2T(ts, tb)
Definition: setpts.c:147
Definition: setpts.c:84
#define NAN
Definition: mathematics.h:64
char * expr_str
Definition: setpts.c:92
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static char * double2int64str(char *buf, double v)
Definition: setpts.c:151
#define D2TS(d)
Definition: setpts.c:145
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static av_cold void uninit(AVFilterContext *ctx)
Definition: setpts.c:261
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
#define isnan(x)
Definition: libm.h:340
#define TS2D(ts)
Definition: setpts.c:146
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
AVMediaType
Definition: avutil.h:199
const char * name
Filter name.
Definition: avfilter.h:148
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static int64_t pts
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: setpts.c:85
static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
Definition: setpts.c:158
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: setpts.c:182
int den
Denominator.
Definition: rational.h:60
#define OFFSET(x)
Definition: setpts.c:268
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static int activate(AVFilterContext *ctx)
Definition: setpts.c:226
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:754
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
An instance of a filter.
Definition: avfilter.h:338
AVExpr * expr
Definition: setpts.c:93
#define FLAGS
Definition: setpts.c:269
internal API functions
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
static av_cold int init(AVFilterContext *ctx)
Definition: setpts.c:98
Definition: setpts.c:76
simple arithmetic expression evaluator