FFmpeg
f_select.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * filter for selecting which frame passes in the filterchain
24  */
25 
26 #include "libavutil/avstring.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/fifo.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/opt.h"
31 #include "avfilter.h"
32 #include "audio.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 #include "scene_sad.h"
37 
38 static const char *const var_names[] = {
39  "TB", ///< timebase
40 
41  "pts", ///< original pts in the file of the frame
42  "start_pts", ///< first PTS in the stream, expressed in TB units
43  "prev_pts", ///< previous frame PTS
44  "prev_selected_pts", ///< previous selected frame PTS
45 
46  "t", ///< timestamp expressed in seconds
47  "start_t", ///< first PTS in the stream, expressed in seconds
48  "prev_t", ///< previous frame time
49  "prev_selected_t", ///< previously selected time
50 
51  "pict_type", ///< the type of picture in the movie
52  "I",
53  "P",
54  "B",
55  "S",
56  "SI",
57  "SP",
58  "BI",
59  "PICT_TYPE_I",
60  "PICT_TYPE_P",
61  "PICT_TYPE_B",
62  "PICT_TYPE_S",
63  "PICT_TYPE_SI",
64  "PICT_TYPE_SP",
65  "PICT_TYPE_BI",
66 
67  "interlace_type", ///< the frame interlace type
68  "PROGRESSIVE",
69  "TOPFIRST",
70  "BOTTOMFIRST",
71 
72  "consumed_samples_n",///< number of samples consumed by the filter (only audio)
73  "samples_n", ///< number of samples in the current frame (only audio)
74  "sample_rate", ///< sample rate (only audio)
75 
76  "n", ///< frame number (starting from zero)
77  "selected_n", ///< selected frame number (starting from zero)
78  "prev_selected_n", ///< number of the last selected frame
79 
80  "key", ///< tell if the frame is a key frame
81  "pos", ///< original position in the file of the frame
82 
83  "scene",
84 
85  "concatdec_select", ///< frame is within the interval set by the concat demuxer
86 
87  NULL
88 };
89 
90 enum var_name {
92 
97 
102 
118 
123 
127 
131 
134 
136 
138 
140 };
141 
142 typedef struct SelectContext {
143  const AVClass *class;
144  char *expr_str;
147  int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
148  ff_scene_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
149  double prev_mafd; ///< previous MAFD (scene detect only)
150  AVFrame *prev_picref; ///< previous frame (scene detect only)
151  double select;
152  int select_out; ///< mark the selected output pad index
154 } SelectContext;
155 
156 #define OFFSET(x) offsetof(SelectContext, x)
157 #define DEFINE_OPTIONS(filt_name, FLAGS) \
158 static const AVOption filt_name##_options[] = { \
159  { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
160  { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
161  { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
162  { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
163  { NULL } \
164 }
165 
166 static int request_frame(AVFilterLink *outlink);
167 
169 {
170  SelectContext *select = ctx->priv;
171  int i, ret;
172 
173  if ((ret = av_expr_parse(&select->expr, select->expr_str,
174  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
175  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
176  select->expr_str);
177  return ret;
178  }
179  select->do_scene_detect = !!strstr(select->expr_str, "scene");
180 
181  for (i = 0; i < select->nb_outputs; i++) {
182  AVFilterPad pad = { 0 };
183 
184  pad.name = av_asprintf("output%d", i);
185  if (!pad.name)
186  return AVERROR(ENOMEM);
187  pad.type = ctx->filter->inputs[0].type;
189  if ((ret = ff_insert_outpad(ctx, i, &pad)) < 0) {
190  av_freep(&pad.name);
191  return ret;
192  }
193  }
194 
195  return 0;
196 }
197 
198 #define INTERLACE_TYPE_P 0
199 #define INTERLACE_TYPE_T 1
200 #define INTERLACE_TYPE_B 2
201 
203 {
204  SelectContext *select = inlink->dst->priv;
205 
206  select->var_values[VAR_N] = 0.0;
207  select->var_values[VAR_SELECTED_N] = 0.0;
208 
209  select->var_values[VAR_TB] = av_q2d(inlink->time_base);
210 
211  select->var_values[VAR_PREV_PTS] = NAN;
214  select->var_values[VAR_PREV_T] = NAN;
215  select->var_values[VAR_START_PTS] = NAN;
216  select->var_values[VAR_START_T] = NAN;
217 
230 
234 
235  select->var_values[VAR_PICT_TYPE] = NAN;
236  select->var_values[VAR_INTERLACE_TYPE] = NAN;
237  select->var_values[VAR_SCENE] = NAN;
239  select->var_values[VAR_SAMPLES_N] = NAN;
240 
241  select->var_values[VAR_SAMPLE_RATE] =
242  inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
243 
244  if (CONFIG_SELECT_FILTER && select->do_scene_detect) {
245  select->sad = ff_scene_sad_get_fn(8);
246  if (!select->sad)
247  return AVERROR(EINVAL);
248  }
249  return 0;
250 }
251 
253 {
254  double ret = 0;
255  SelectContext *select = ctx->priv;
256  AVFrame *prev_picref = select->prev_picref;
257 
258  if (prev_picref &&
259  frame->height == prev_picref->height &&
260  frame->width == prev_picref->width) {
261  uint64_t sad;
262  double mafd, diff;
263 
264  select->sad(prev_picref->data[0], prev_picref->linesize[0], frame->data[0], frame->linesize[0], frame->width * 3, frame->height, &sad);
265  emms_c();
266  mafd = (double)sad / (frame->width * 3 * frame->height);
267  diff = fabs(mafd - select->prev_mafd);
268  ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
269  select->prev_mafd = mafd;
270  av_frame_free(&prev_picref);
271  }
272  select->prev_picref = av_frame_clone(frame);
273  return ret;
274 }
275 
276 static double get_concatdec_select(AVFrame *frame, int64_t pts)
277 {
278  AVDictionary *metadata = frame->metadata;
279  AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0);
280  AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0);
281  if (start_time_entry) {
282  int64_t start_time = strtoll(start_time_entry->value, NULL, 10);
283  if (pts >= start_time) {
284  if (duration_entry) {
285  int64_t duration = strtoll(duration_entry->value, NULL, 10);
286  if (pts < start_time + duration)
287  return -1;
288  else
289  return 0;
290  }
291  return -1;
292  }
293  return 0;
294  }
295  return NAN;
296 }
297 
298 #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
299 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
300 
302 {
303  SelectContext *select = ctx->priv;
304  AVFilterLink *inlink = ctx->inputs[0];
305  double res;
306 
307  if (isnan(select->var_values[VAR_START_PTS]))
308  select->var_values[VAR_START_PTS] = TS2D(frame->pts);
309  if (isnan(select->var_values[VAR_START_T]))
310  select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
311 
312  select->var_values[VAR_N ] = inlink->frame_count_out;
313  select->var_values[VAR_PTS] = TS2D(frame->pts);
314  select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
315  select->var_values[VAR_POS] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
316  select->var_values[VAR_KEY] = frame->key_frame;
318 
319  switch (inlink->type) {
320  case AVMEDIA_TYPE_AUDIO:
321  select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
322  break;
323 
324  case AVMEDIA_TYPE_VIDEO:
325  select->var_values[VAR_INTERLACE_TYPE] =
328  select->var_values[VAR_PICT_TYPE] = frame->pict_type;
329  if (select->do_scene_detect) {
330  char buf[32];
331  select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
332  // TODO: document metadata
333  snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
334  av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
335  }
336  break;
337  }
338 
339  select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
340  av_log(inlink->dst, AV_LOG_DEBUG,
341  "n:%f pts:%f t:%f key:%d",
342  select->var_values[VAR_N],
343  select->var_values[VAR_PTS],
344  select->var_values[VAR_T],
345  frame->key_frame);
346 
347  switch (inlink->type) {
348  case AVMEDIA_TYPE_VIDEO:
349  av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
350  (!frame->interlaced_frame) ? 'P' :
351  frame->top_field_first ? 'T' : 'B',
353  select->var_values[VAR_SCENE]);
354  break;
355  case AVMEDIA_TYPE_AUDIO:
356  av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
357  frame->nb_samples,
359  break;
360  }
361 
362  if (res == 0) {
363  select->select_out = -1; /* drop */
364  } else if (isnan(res) || res < 0) {
365  select->select_out = 0; /* first output */
366  } else {
367  select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
368  }
369 
370  av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
371 
372  if (res) {
373  select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
375  select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
376  select->var_values[VAR_SELECTED_N] += 1.0;
377  if (inlink->type == AVMEDIA_TYPE_AUDIO)
378  select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
379  }
380 
381  select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
382  select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
383 }
384 
386 {
387  AVFilterContext *ctx = inlink->dst;
388  SelectContext *select = ctx->priv;
389 
390  select_frame(ctx, frame);
391  if (select->select)
392  return ff_filter_frame(ctx->outputs[select->select_out], frame);
393 
394  av_frame_free(&frame);
395  return 0;
396 }
397 
398 static int request_frame(AVFilterLink *outlink)
399 {
400  AVFilterLink *inlink = outlink->src->inputs[0];
401  int ret = ff_request_frame(inlink);
402  return ret;
403 }
404 
406 {
407  SelectContext *select = ctx->priv;
408  int i;
409 
410  av_expr_free(select->expr);
411  select->expr = NULL;
412 
413  for (i = 0; i < ctx->nb_outputs; i++)
414  av_freep(&ctx->output_pads[i].name);
415 
416  if (select->do_scene_detect) {
417  av_frame_free(&select->prev_picref);
418  }
419 }
420 
421 #if CONFIG_ASELECT_FILTER
422 
424 AVFILTER_DEFINE_CLASS(aselect);
425 
426 static av_cold int aselect_init(AVFilterContext *ctx)
427 {
428  SelectContext *select = ctx->priv;
429  int ret;
430 
431  if ((ret = init(ctx)) < 0)
432  return ret;
433 
434  if (select->do_scene_detect) {
435  av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
436  return AVERROR(EINVAL);
437  }
438 
439  return 0;
440 }
441 
442 static const AVFilterPad avfilter_af_aselect_inputs[] = {
443  {
444  .name = "default",
445  .type = AVMEDIA_TYPE_AUDIO,
446  .config_props = config_input,
447  .filter_frame = filter_frame,
448  },
449  { NULL }
450 };
451 
453  .name = "aselect",
454  .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
455  .init = aselect_init,
456  .uninit = uninit,
457  .priv_size = sizeof(SelectContext),
458  .inputs = avfilter_af_aselect_inputs,
459  .priv_class = &aselect_class,
461 };
462 #endif /* CONFIG_ASELECT_FILTER */
463 
464 #if CONFIG_SELECT_FILTER
465 
466 static int query_formats(AVFilterContext *ctx)
467 {
468  SelectContext *select = ctx->priv;
469 
470  if (!select->do_scene_detect) {
471  return ff_default_query_formats(ctx);
472  } else {
473  int ret;
474  static const enum AVPixelFormat pix_fmts[] = {
477  };
478  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
479 
480  if (!fmts_list)
481  return AVERROR(ENOMEM);
482  ret = ff_set_common_formats(ctx, fmts_list);
483  if (ret < 0)
484  return ret;
485  }
486  return 0;
487 }
488 
491 
492 static av_cold int select_init(AVFilterContext *ctx)
493 {
494  int ret;
495 
496  if ((ret = init(ctx)) < 0)
497  return ret;
498 
499  return 0;
500 }
501 
502 static const AVFilterPad avfilter_vf_select_inputs[] = {
503  {
504  .name = "default",
505  .type = AVMEDIA_TYPE_VIDEO,
506  .config_props = config_input,
507  .filter_frame = filter_frame,
508  },
509  { NULL }
510 };
511 
513  .name = "select",
514  .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
515  .init = select_init,
516  .uninit = uninit,
517  .query_formats = query_formats,
518  .priv_size = sizeof(SelectContext),
519  .priv_class = &select_class,
520  .inputs = avfilter_vf_select_inputs,
522 };
523 #endif /* CONFIG_SELECT_FILTER */
#define NULL
Definition: coverity.c:32
void(* ff_scene_sad_fn)(SCENE_SAD_PARAMS)
Definition: scene_sad.h:34
BI type.
Definition: avutil.h:280
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:539
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_select.c:405
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
ff_scene_sad_fn ff_scene_sad_get_fn(int depth)
Definition: scene_sad.c:59
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:278
#define INTERLACE_TYPE_P
Definition: f_select.c:198
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
static double get_concatdec_select(AVFrame *frame, int64_t pts)
Definition: f_select.c:276
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
Switching Intra.
Definition: avutil.h:278
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
static int64_t start_time
Definition: ffplay.c:331
#define av_cold
Definition: attributes.h:82
ff_scene_sad_fn sad
Sum of the absolute difference function (scene detect only)
Definition: f_select.c:148
AVOptions.
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:301
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:361
Definition: eval.c:157
int nb_outputs
Definition: f_select.c:153
#define INTERLACE_TYPE_T
Definition: f_select.c:199
int64_t duration
Definition: movenc.c:63
char * expr_str
Definition: f_select.c:144
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
AVDictionary * metadata
metadata.
Definition: frame.h:554
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:415
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
#define av_log(a,...)
AVFilter ff_vf_select
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
Scene SAD functions.
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
double var_values[VAR_VARS_NB]
Definition: f_select.c:146
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int select_out
mark the selected output pad index
Definition: f_select.c:152
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
var_name
Definition: aeval.c:46
static int config_input(AVFilterLink *inlink)
Definition: f_select.c:202
common internal API header
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:351
#define NAN
Definition: mathematics.h:64
#define FFMIN(a, b)
Definition: common.h:96
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: f_select.c:385
const AVFilterPad * inputs
List of inputs, terminated by a zeroed element.
Definition: avfilter.h:164
int ff_default_query_formats(AVFilterContext *ctx)
Definition: formats.c:597
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:252
a very simple circular buffer FIFO implementation
void * buf
Definition: avisynth_c.h:766
double prev_mafd
previous MAFD (scene detect only)
Definition: f_select.c:149
AVFilter ff_af_aselect
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
Definition: f_select.c:98
Switching Predicted.
Definition: avutil.h:279
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
#define isnan(x)
Definition: libm.h:340
#define TS2D(ts)
Definition: f_select.c:299
const char * name
Filter name.
Definition: avfilter.h:148
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
static int64_t pts
static int request_frame(AVFilterLink *outlink)
Definition: f_select.c:398
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
int do_scene_detect
1 if the expression requires scene detection variables, 0 otherwise
Definition: f_select.c:147
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
AVFrame * prev_picref
previous frame (scene detect only)
Definition: f_select.c:150
#define DEFINE_OPTIONS(filt_name, FLAGS)
Definition: f_select.c:157
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
static av_cold int init(AVFilterContext *ctx)
Definition: f_select.c:168
Bi-dir predicted.
Definition: avutil.h:276
static av_always_inline int diff(const uint32_t a, const uint32_t b)
char * value
Definition: dict.h:87
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:420
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
AVExpr * expr
Definition: f_select.c:145
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:346
A list of supported formats for one end of a filter link.
Definition: formats.h:64
int(* request_frame)(AVFilterLink *link)
Frame request callback.
Definition: internal.h:113
An instance of a filter.
Definition: avfilter.h:338
static const char *const var_names[]
Definition: f_select.c:38
int height
Definition: frame.h:326
#define av_freep(p)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
#define INTERLACE_TYPE_B
Definition: f_select.c:200
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:285
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
double select
Definition: f_select.c:151
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:334
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
Predicted.
Definition: avutil.h:275
simple arithmetic expression evaluator