FFmpeg
f_select.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * filter for selecting which frame passes in the filterchain
24  */
25 
26 #include "libavutil/avstring.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/fifo.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avfilter.h"
34 #include "audio.h"
35 #include "formats.h"
36 #include "internal.h"
37 #include "video.h"
38 #include "scene_sad.h"
39 
40 static const char *const var_names[] = {
41  "TB", ///< timebase
42 
43  "pts", ///< original pts in the file of the frame
44  "start_pts", ///< first PTS in the stream, expressed in TB units
45  "prev_pts", ///< previous frame PTS
46  "prev_selected_pts", ///< previous selected frame PTS
47 
48  "t", ///< timestamp expressed in seconds
49  "start_t", ///< first PTS in the stream, expressed in seconds
50  "prev_t", ///< previous frame time
51  "prev_selected_t", ///< previously selected time
52 
53  "pict_type", ///< the type of picture in the movie
54  "I",
55  "P",
56  "B",
57  "S",
58  "SI",
59  "SP",
60  "BI",
61  "PICT_TYPE_I",
62  "PICT_TYPE_P",
63  "PICT_TYPE_B",
64  "PICT_TYPE_S",
65  "PICT_TYPE_SI",
66  "PICT_TYPE_SP",
67  "PICT_TYPE_BI",
68 
69  "interlace_type", ///< the frame interlace type
70  "PROGRESSIVE",
71  "TOPFIRST",
72  "BOTTOMFIRST",
73 
74  "consumed_samples_n",///< number of samples consumed by the filter (only audio)
75  "samples_n", ///< number of samples in the current frame (only audio)
76  "sample_rate", ///< sample rate (only audio)
77 
78  "n", ///< frame number (starting from zero)
79  "selected_n", ///< selected frame number (starting from zero)
80  "prev_selected_n", ///< number of the last selected frame
81 
82  "key", ///< tell if the frame is a key frame
83  "pos", ///< original position in the file of the frame
84 
85  "scene",
86 
87  "concatdec_select", ///< frame is within the interval set by the concat demuxer
88 
89  NULL
90 };
91 
92 enum var_name {
94 
99 
104 
120 
125 
129 
133 
136 
138 
140 
142 };
143 
144 typedef struct SelectContext {
145  const AVClass *class;
146  char *expr_str;
149  int bitdepth;
151  ptrdiff_t width[4];
152  ptrdiff_t height[4];
153  int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
154  ff_scene_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
155  double prev_mafd; ///< previous MAFD (scene detect only)
156  AVFrame *prev_picref; ///< previous frame (scene detect only)
157  double select;
158  int select_out; ///< mark the selected output pad index
160 } SelectContext;
161 
162 #define OFFSET(x) offsetof(SelectContext, x)
163 #define DEFINE_OPTIONS(filt_name, FLAGS) \
164 static const AVOption filt_name##_options[] = { \
165  { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
166  { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
167  { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
168  { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
169  { NULL } \
170 }
171 
172 static int request_frame(AVFilterLink *outlink);
173 
175 {
176  SelectContext *select = ctx->priv;
177  int i, ret;
178 
179  if ((ret = av_expr_parse(&select->expr, select->expr_str,
180  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
181  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
182  select->expr_str);
183  return ret;
184  }
185  select->do_scene_detect = !!strstr(select->expr_str, "scene");
186 
187  for (i = 0; i < select->nb_outputs; i++) {
188  AVFilterPad pad = { 0 };
189 
190  pad.name = av_asprintf("output%d", i);
191  if (!pad.name)
192  return AVERROR(ENOMEM);
193  pad.type = ctx->filter->inputs[0].type;
195  if ((ret = ff_insert_outpad(ctx, i, &pad)) < 0) {
196  av_freep(&pad.name);
197  return ret;
198  }
199  }
200 
201  return 0;
202 }
203 
204 #define INTERLACE_TYPE_P 0
205 #define INTERLACE_TYPE_T 1
206 #define INTERLACE_TYPE_B 2
207 
209 {
210  SelectContext *select = inlink->dst->priv;
212 
213  select->bitdepth = desc->comp[0].depth;
214  select->nb_planes = av_pix_fmt_count_planes(inlink->format);
215  for (int plane = 0; plane < select->nb_planes; plane++) {
216  ptrdiff_t line_size = av_image_get_linesize(inlink->format, inlink->w, plane);
217  int vsub = desc->log2_chroma_h;
218 
219  select->width[plane] = line_size >> (select->bitdepth > 8);
220  select->height[plane] = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
221  }
222 
223  select->var_values[VAR_N] = 0.0;
224  select->var_values[VAR_SELECTED_N] = 0.0;
225 
226  select->var_values[VAR_TB] = av_q2d(inlink->time_base);
227 
228  select->var_values[VAR_PREV_PTS] = NAN;
231  select->var_values[VAR_PREV_T] = NAN;
232  select->var_values[VAR_START_PTS] = NAN;
233  select->var_values[VAR_START_T] = NAN;
234 
247 
251 
252  select->var_values[VAR_PICT_TYPE] = NAN;
253  select->var_values[VAR_INTERLACE_TYPE] = NAN;
254  select->var_values[VAR_SCENE] = NAN;
256  select->var_values[VAR_SAMPLES_N] = NAN;
257 
258  select->var_values[VAR_SAMPLE_RATE] =
259  inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
260 
261  if (CONFIG_SELECT_FILTER && select->do_scene_detect) {
262  select->sad = ff_scene_sad_get_fn(select->bitdepth == 8 ? 8 : 16);
263  if (!select->sad)
264  return AVERROR(EINVAL);
265  }
266  return 0;
267 }
268 
270 {
271  double ret = 0;
272  SelectContext *select = ctx->priv;
273  AVFrame *prev_picref = select->prev_picref;
274 
275  if (prev_picref &&
276  frame->height == prev_picref->height &&
277  frame->width == prev_picref->width) {
278  uint64_t sad = 0;
279  double mafd, diff;
280  uint64_t count = 0;
281 
282  for (int plane = 0; plane < select->nb_planes; plane++) {
283  uint64_t plane_sad;
284  select->sad(prev_picref->data[plane], prev_picref->linesize[plane],
285  frame->data[plane], frame->linesize[plane],
286  select->width[plane], select->height[plane], &plane_sad);
287  sad += plane_sad;
288  count += select->width[plane] * select->height[plane];
289  }
290 
291  emms_c();
292  mafd = (double)sad / count / (1ULL << (select->bitdepth - 8));
293  diff = fabs(mafd - select->prev_mafd);
294  ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
295  select->prev_mafd = mafd;
296  av_frame_free(&prev_picref);
297  }
298  select->prev_picref = av_frame_clone(frame);
299  return ret;
300 }
301 
302 static double get_concatdec_select(AVFrame *frame, int64_t pts)
303 {
304  AVDictionary *metadata = frame->metadata;
305  AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0);
306  AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0);
307  if (start_time_entry) {
308  int64_t start_time = strtoll(start_time_entry->value, NULL, 10);
309  if (pts >= start_time) {
310  if (duration_entry) {
311  int64_t duration = strtoll(duration_entry->value, NULL, 10);
312  if (pts < start_time + duration)
313  return -1;
314  else
315  return 0;
316  }
317  return -1;
318  }
319  return 0;
320  }
321  return NAN;
322 }
323 
324 #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
325 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
326 
328 {
329  SelectContext *select = ctx->priv;
330  AVFilterLink *inlink = ctx->inputs[0];
331  double res;
332 
333  if (isnan(select->var_values[VAR_START_PTS]))
334  select->var_values[VAR_START_PTS] = TS2D(frame->pts);
335  if (isnan(select->var_values[VAR_START_T]))
336  select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
337 
338  select->var_values[VAR_N ] = inlink->frame_count_out;
339  select->var_values[VAR_PTS] = TS2D(frame->pts);
340  select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
341  select->var_values[VAR_POS] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
342  select->var_values[VAR_KEY] = frame->key_frame;
344 
345  switch (inlink->type) {
346  case AVMEDIA_TYPE_AUDIO:
347  select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
348  break;
349 
350  case AVMEDIA_TYPE_VIDEO:
351  select->var_values[VAR_INTERLACE_TYPE] =
354  select->var_values[VAR_PICT_TYPE] = frame->pict_type;
355  if (select->do_scene_detect) {
356  char buf[32];
357  select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
358  // TODO: document metadata
359  snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
360  av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
361  }
362  break;
363  }
364 
365  select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
366  av_log(inlink->dst, AV_LOG_DEBUG,
367  "n:%f pts:%f t:%f key:%d",
368  select->var_values[VAR_N],
369  select->var_values[VAR_PTS],
370  select->var_values[VAR_T],
371  frame->key_frame);
372 
373  switch (inlink->type) {
374  case AVMEDIA_TYPE_VIDEO:
375  av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
376  (!frame->interlaced_frame) ? 'P' :
377  frame->top_field_first ? 'T' : 'B',
379  select->var_values[VAR_SCENE]);
380  break;
381  case AVMEDIA_TYPE_AUDIO:
382  av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
383  frame->nb_samples,
385  break;
386  }
387 
388  if (res == 0) {
389  select->select_out = -1; /* drop */
390  } else if (isnan(res) || res < 0) {
391  select->select_out = 0; /* first output */
392  } else {
393  select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
394  }
395 
396  av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
397 
398  if (res) {
399  select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
401  select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
402  select->var_values[VAR_SELECTED_N] += 1.0;
403  if (inlink->type == AVMEDIA_TYPE_AUDIO)
404  select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
405  }
406 
407  select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
408  select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
409 }
410 
412 {
413  AVFilterContext *ctx = inlink->dst;
414  SelectContext *select = ctx->priv;
415 
416  select_frame(ctx, frame);
417  if (select->select)
418  return ff_filter_frame(ctx->outputs[select->select_out], frame);
419 
420  av_frame_free(&frame);
421  return 0;
422 }
423 
424 static int request_frame(AVFilterLink *outlink)
425 {
426  AVFilterLink *inlink = outlink->src->inputs[0];
427  int ret = ff_request_frame(inlink);
428  return ret;
429 }
430 
432 {
433  SelectContext *select = ctx->priv;
434  int i;
435 
436  av_expr_free(select->expr);
437  select->expr = NULL;
438 
439  for (i = 0; i < ctx->nb_outputs; i++)
440  av_freep(&ctx->output_pads[i].name);
441 
442  if (select->do_scene_detect) {
443  av_frame_free(&select->prev_picref);
444  }
445 }
446 
447 #if CONFIG_ASELECT_FILTER
448 
450 AVFILTER_DEFINE_CLASS(aselect);
451 
452 static av_cold int aselect_init(AVFilterContext *ctx)
453 {
454  SelectContext *select = ctx->priv;
455  int ret;
456 
457  if ((ret = init(ctx)) < 0)
458  return ret;
459 
460  if (select->do_scene_detect) {
461  av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
462  return AVERROR(EINVAL);
463  }
464 
465  return 0;
466 }
467 
468 static const AVFilterPad avfilter_af_aselect_inputs[] = {
469  {
470  .name = "default",
471  .type = AVMEDIA_TYPE_AUDIO,
472  .config_props = config_input,
473  .filter_frame = filter_frame,
474  },
475  { NULL }
476 };
477 
479  .name = "aselect",
480  .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
481  .init = aselect_init,
482  .uninit = uninit,
483  .priv_size = sizeof(SelectContext),
484  .inputs = avfilter_af_aselect_inputs,
485  .priv_class = &aselect_class,
487 };
488 #endif /* CONFIG_ASELECT_FILTER */
489 
490 #if CONFIG_SELECT_FILTER
491 
492 static int query_formats(AVFilterContext *ctx)
493 {
494  SelectContext *select = ctx->priv;
495 
496  if (!select->do_scene_detect) {
497  return ff_default_query_formats(ctx);
498  } else {
499  int ret;
500  static const enum AVPixelFormat pix_fmts[] = {
507  };
508  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
509 
510  if (!fmts_list)
511  return AVERROR(ENOMEM);
512  ret = ff_set_common_formats(ctx, fmts_list);
513  if (ret < 0)
514  return ret;
515  }
516  return 0;
517 }
518 
521 
522 static av_cold int select_init(AVFilterContext *ctx)
523 {
524  int ret;
525 
526  if ((ret = init(ctx)) < 0)
527  return ret;
528 
529  return 0;
530 }
531 
532 static const AVFilterPad avfilter_vf_select_inputs[] = {
533  {
534  .name = "default",
535  .type = AVMEDIA_TYPE_VIDEO,
536  .config_props = config_input,
537  .filter_frame = filter_frame,
538  },
539  { NULL }
540 };
541 
543  .name = "select",
544  .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
545  .init = select_init,
546  .uninit = uninit,
547  .query_formats = query_formats,
548  .priv_size = sizeof(SelectContext),
549  .priv_class = &select_class,
550  .inputs = avfilter_vf_select_inputs,
552 };
553 #endif /* CONFIG_SELECT_FILTER */
int plane
Definition: avisynth_c.h:384
#define NULL
Definition: coverity.c:32
void(* ff_scene_sad_fn)(SCENE_SAD_PARAMS)
Definition: scene_sad.h:34
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane...
Definition: imgutils.c:76
BI type.
Definition: avutil.h:280
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:566
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_select.c:431
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
ff_scene_sad_fn ff_scene_sad_get_fn(int depth)
Definition: scene_sad.c:59
const char * desc
Definition: nvenc.c:68
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:278
#define INTERLACE_TYPE_P
Definition: f_select.c:204
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
static double get_concatdec_select(AVFrame *frame, int64_t pts)
Definition: f_select.c:302
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
Switching Intra.
Definition: avutil.h:278
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
static int64_t start_time
Definition: ffplay.c:331
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
#define av_cold
Definition: attributes.h:82
ff_scene_sad_fn sad
Sum of the absolute difference function (scene detect only)
Definition: f_select.c:154
AVOptions.
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:327
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
Definition: eval.c:157
int nb_outputs
Definition: f_select.c:159
#define INTERLACE_TYPE_T
Definition: f_select.c:205
int64_t duration
Definition: movenc.c:63
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
char * expr_str
Definition: f_select.c:146
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AVDictionary * metadata
metadata.
Definition: frame.h:581
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
#define av_log(a,...)
AVFilter ff_vf_select
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
Scene SAD functions.
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
double var_values[VAR_VARS_NB]
Definition: f_select.c:148
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
GLsizei count
Definition: opengl_enc.c:108
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
int select_out
mark the selected output pad index
Definition: f_select.c:158
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
var_name
Definition: aeval.c:46
static int config_input(AVFilterLink *inlink)
Definition: f_select.c:208
common internal API header
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
#define NAN
Definition: mathematics.h:64
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
ptrdiff_t width[4]
Definition: f_select.c:151
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: f_select.c:411
const AVFilterPad * inputs
List of inputs, terminated by a zeroed element.
Definition: avfilter.h:164
int ff_default_query_formats(AVFilterContext *ctx)
Definition: formats.c:597
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:269
a very simple circular buffer FIFO implementation
void * buf
Definition: avisynth_c.h:766
double prev_mafd
previous MAFD (scene detect only)
Definition: f_select.c:155
AVFilter ff_af_aselect
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
Switching Predicted.
Definition: avutil.h:279
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
#define isnan(x)
Definition: libm.h:340
#define TS2D(ts)
Definition: f_select.c:325
const char * name
Filter name.
Definition: avfilter.h:148
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
static int64_t pts
static int request_frame(AVFilterLink *outlink)
Definition: f_select.c:424
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int do_scene_detect
1 if the expression requires scene detection variables, 0 otherwise
Definition: f_select.c:153
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
AVFrame * prev_picref
previous frame (scene detect only)
Definition: f_select.c:156
#define DEFINE_OPTIONS(filt_name, FLAGS)
Definition: f_select.c:163
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
ptrdiff_t height[4]
Definition: f_select.c:152
static av_cold int init(AVFilterContext *ctx)
Definition: f_select.c:174
Bi-dir predicted.
Definition: avutil.h:276
static av_always_inline int diff(const uint32_t a, const uint32_t b)
char * value
Definition: dict.h:87
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:447
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
AVExpr * expr
Definition: f_select.c:147
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
A list of supported formats for one end of a filter link.
Definition: formats.h:64
int(* request_frame)(AVFilterLink *link)
Frame request callback.
Definition: internal.h:113
An instance of a filter.
Definition: avfilter.h:338
static const char *const var_names[]
Definition: f_select.c:40
int height
Definition: frame.h:353
#define av_freep(p)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
#define INTERLACE_TYPE_B
Definition: f_select.c:206
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:285
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
double select
Definition: f_select.c:157
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
Predicted.
Definition: avutil.h:275
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58