FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 #include "graph/graphprint.h"
25 
26 #include "libavfilter/avfilter.h"
27 #include "libavfilter/buffersink.h"
28 #include "libavfilter/buffersrc.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
34 #include "libavutil/downmix_info.h"
35 #include "libavutil/mem.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/time.h"
41 #include "libavutil/timestamp.h"
42 
43 typedef struct FilterGraphPriv {
45 
46  // name used for logging
47  char log_name[32];
48 
49  int is_simple;
50  // true when the filtergraph contains only meta filters
51  // that do not modify the frame data
52  int is_meta;
53  // source filters are present in the graph
56 
57  unsigned nb_outputs_done;
58 
60 
61  // frame for temporarily holding output from the filtergraph
63  // frame for sending output to the encoder
65 
67  unsigned sch_idx;
69 
71 {
72  return (FilterGraphPriv*)fg;
73 }
74 
75 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
76 {
77  return (const FilterGraphPriv*)fg;
78 }
79 
80 // data that is local to the filter thread and not visible outside of it
81 typedef struct FilterGraphThread {
83 
85 
86  // Temporary buffer for output frames, since on filtergraph reset
87  // we cannot send them to encoders immediately.
88  // The output index is stored in frame opaque.
90 
91  // index of the next input to request from the scheduler
92  unsigned next_in;
93  // set to 1 after at least one frame passed through this output
94  int got_frame;
95 
96  // EOF status of each input/output, as received by the thread
97  uint8_t *eof_in;
98  uint8_t *eof_out;
100 
101 typedef struct InputFilterPriv {
103 
105 
106  // used to hold submitted input
108 
109  // For inputs bound to a filtergraph output
111 
112  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
113  // same as type otherwise
115 
116  int eof;
117  int bound;
119  uint64_t nb_dropped;
120 
121  // parameters configured for this input
122  int format;
123 
124  int width, height;
129 
132 
134 
137 
139 
141 
145 
148 
149  struct {
150  AVFrame *frame;
151 
154 
155  /// marks if sub2video_update should force an initialization
156  unsigned int initialize;
157  } sub2video;
159 
161 {
162  return (InputFilterPriv*)ifilter;
163 }
164 
165 typedef struct FPSConvContext {
167  /* number of frames emitted by the video-encoding sync code */
169  /* history of nb_frames_prev, i.e. the number of times the
170  * previous frame was duplicated by vsync code in recent
171  * do_video_out() calls */
173 
174  uint64_t dup_warning;
175 
178 
180 
186 
187 typedef struct OutputFilterPriv {
189 
190  void *log_parent;
191  char log_name[32];
192 
193  int needed;
194 
195  /* desired output stream properties */
196  int format;
197  int width, height;
203 
204  unsigned crop_top;
205  unsigned crop_bottom;
206  unsigned crop_left;
207  unsigned crop_right;
208 
211 
212  // time base in which the output is sent to our downstream
213  // does not need to match the filtersink's timebase
215  // at least one frame with the above timebase was sent
216  // to our downstream, so it cannot change anymore
218 
220 
223 
224  // those are only set if no format is specified and the encoder gives us multiple options
225  // They point directly to the relevant lists of the encoder.
226  const int *formats;
228  const int *sample_rates;
232 
234 
238  // offset for output timestamps, in AV_TIME_BASE_Q
242 
243  unsigned flags;
245 
247 {
248  return (OutputFilterPriv*)ofilter;
249 }
250 
251 typedef struct FilterCommand {
252  char *target;
253  char *command;
254  char *arg;
255 
256  double time;
258 } FilterCommand;
259 
260 static void filter_command_free(void *opaque, uint8_t *data)
261 {
263 
264  av_freep(&fc->target);
265  av_freep(&fc->command);
266  av_freep(&fc->arg);
267 
268  av_free(data);
269 }
270 
272 {
273  AVFrame *frame = ifp->sub2video.frame;
274  int ret;
275 
277 
278  frame->width = ifp->width;
279  frame->height = ifp->height;
280  frame->format = ifp->format;
281  frame->colorspace = ifp->color_space;
282  frame->color_range = ifp->color_range;
283  frame->alpha_mode = ifp->alpha_mode;
284 
286  if (ret < 0)
287  return ret;
288 
289  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
290 
291  return 0;
292 }
293 
294 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
295  AVSubtitleRect *r)
296 {
297  uint32_t *pal, *dst2;
298  uint8_t *src, *src2;
299  int x, y;
300 
301  if (r->type != SUBTITLE_BITMAP) {
302  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
303  return;
304  }
305  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
306  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
307  r->x, r->y, r->w, r->h, w, h
308  );
309  return;
310  }
311 
312  dst += r->y * dst_linesize + r->x * 4;
313  src = r->data[0];
314  pal = (uint32_t *)r->data[1];
315  for (y = 0; y < r->h; y++) {
316  dst2 = (uint32_t *)dst;
317  src2 = src;
318  for (x = 0; x < r->w; x++)
319  *(dst2++) = pal[*(src2++)];
320  dst += dst_linesize;
321  src += r->linesize[0];
322  }
323 }
324 
326 {
327  AVFrame *frame = ifp->sub2video.frame;
328  int ret;
329 
330  av_assert1(frame->data[0]);
331  ifp->sub2video.last_pts = frame->pts = pts;
335  if (ret != AVERROR_EOF && ret < 0)
337  "Error while add the frame to buffer source(%s).\n",
338  av_err2str(ret));
339 }
340 
341 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
342  const AVSubtitle *sub)
343 {
344  AVFrame *frame = ifp->sub2video.frame;
345  int8_t *dst;
346  int dst_linesize;
347  int num_rects;
348  int64_t pts, end_pts;
349 
350  if (sub) {
351  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
352  AV_TIME_BASE_Q, ifp->time_base);
353  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
354  AV_TIME_BASE_Q, ifp->time_base);
355  num_rects = sub->num_rects;
356  } else {
357  /* If we are initializing the system, utilize current heartbeat
358  PTS as the start time, and show until the following subpicture
359  is received. Otherwise, utilize the previous subpicture's end time
360  as the fall-back value. */
361  pts = ifp->sub2video.initialize ?
362  heartbeat_pts : ifp->sub2video.end_pts;
363  end_pts = INT64_MAX;
364  num_rects = 0;
365  }
366  if (sub2video_get_blank_frame(ifp) < 0) {
368  "Impossible to get a blank canvas.\n");
369  return;
370  }
371  dst = frame->data [0];
372  dst_linesize = frame->linesize[0];
373  for (int i = 0; i < num_rects; i++)
374  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
375  sub2video_push_ref(ifp, pts);
376  ifp->sub2video.end_pts = end_pts;
377  ifp->sub2video.initialize = 0;
378 }
379 
380 /* Define a function for appending a list of allowed formats
381  * to an AVBPrint. If nonempty, the list will have a header. */
382 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
383 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
384 { \
385  if (ofp->var == none && !ofp->supported_list) \
386  return; \
387  av_bprintf(bprint, #name "="); \
388  if (ofp->var != none) { \
389  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
390  } else { \
391  const type *p; \
392  \
393  for (p = ofp->supported_list; *p != none; p++) { \
394  av_bprintf(bprint, printf_format "|", get_name(*p)); \
395  } \
396  if (bprint->len > 0) \
397  bprint->str[--bprint->len] = '\0'; \
398  } \
399  av_bprint_chars(bprint, ':', 1); \
400 }
401 
404 
407 
409  "%d", )
410 
411 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
413 
414 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
416 
417 DEF_CHOOSE_FORMAT(alpha_modes, enum AVAlphaMode, alpha_mode, alpha_modes,
419 
420 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
421 {
422  if (av_channel_layout_check(&ofp->ch_layout)) {
423  av_bprintf(bprint, "channel_layouts=");
424  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
425  } else if (ofp->ch_layouts) {
426  const AVChannelLayout *p;
427 
428  av_bprintf(bprint, "channel_layouts=");
429  for (p = ofp->ch_layouts; p->nb_channels; p++) {
431  av_bprintf(bprint, "|");
432  }
433  if (bprint->len > 0)
434  bprint->str[--bprint->len] = '\0';
435  } else
436  return;
437  av_bprint_chars(bprint, ':', 1);
438 }
439 
440 static int read_binary(void *logctx, const char *path,
441  uint8_t **data, int *len)
442 {
443  AVIOContext *io = NULL;
444  int64_t fsize;
445  int ret;
446 
447  *data = NULL;
448  *len = 0;
449 
450  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
451  if (ret < 0) {
452  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
453  path, av_err2str(ret));
454  return ret;
455  }
456 
457  fsize = avio_size(io);
458  if (fsize < 0 || fsize > INT_MAX) {
459  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
460  ret = AVERROR(EIO);
461  goto fail;
462  }
463 
464  *data = av_malloc(fsize);
465  if (!*data) {
466  ret = AVERROR(ENOMEM);
467  goto fail;
468  }
469 
470  ret = avio_read(io, *data, fsize);
471  if (ret != fsize) {
472  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
473  ret = ret < 0 ? ret : AVERROR(EIO);
474  goto fail;
475  }
476 
477  *len = fsize;
478 
479  ret = 0;
480 fail:
481  avio_close(io);
482  if (ret < 0) {
483  av_freep(data);
484  *len = 0;
485  }
486  return ret;
487 }
488 
489 static int filter_opt_apply(void *logctx, AVFilterContext *f,
490  const char *key, const char *val)
491 {
492  const AVOption *o = NULL;
493  int ret;
494 
496  if (ret >= 0)
497  return 0;
498 
499  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
501  if (!o)
502  goto err_apply;
503 
504  // key is a valid option name prefixed with '/'
505  // interpret value as a path from which to load the actual option value
506  key++;
507 
508  if (o->type == AV_OPT_TYPE_BINARY) {
509  uint8_t *data;
510  int len;
511 
512  ret = read_binary(logctx, val, &data, &len);
513  if (ret < 0)
514  goto err_load;
515 
517  av_freep(&data);
518  } else {
519  char *data = read_file_to_string(val);
520  if (!data) {
521  ret = AVERROR(EIO);
522  goto err_load;
523  }
524 
526  av_freep(&data);
527  }
528  if (ret < 0)
529  goto err_apply;
530 
531  return 0;
532 
533 err_apply:
534  av_log(logctx, AV_LOG_ERROR,
535  "Error applying option '%s' to filter '%s': %s\n",
536  key, f->filter->name, av_err2str(ret));
537  return ret;
538 err_load:
539  av_log(logctx, AV_LOG_ERROR,
540  "Error loading value for option '%s' from file '%s'\n",
541  key, val);
542  return ret;
543 }
544 
545 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
546 {
547  for (size_t i = 0; i < seg->nb_chains; i++) {
548  AVFilterChain *ch = seg->chains[i];
549 
550  for (size_t j = 0; j < ch->nb_filters; j++) {
551  AVFilterParams *p = ch->filters[j];
552  const AVDictionaryEntry *e = NULL;
553 
554  av_assert0(p->filter);
555 
556  while ((e = av_dict_iterate(p->opts, e))) {
557  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
558  if (ret < 0)
559  return ret;
560  }
561 
562  av_dict_free(&p->opts);
563  }
564  }
565 
566  return 0;
567 }
568 
569 static int graph_parse(void *logctx,
570  AVFilterGraph *graph, const char *desc,
572  AVBufferRef *hw_device)
573 {
575  int ret;
576 
577  *inputs = NULL;
578  *outputs = NULL;
579 
580  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
581  if (ret < 0)
582  return ret;
583 
585  if (ret < 0)
586  goto fail;
587 
588  if (hw_device) {
589  for (int i = 0; i < graph->nb_filters; i++) {
590  AVFilterContext *f = graph->filters[i];
591 
592  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
593  continue;
594  f->hw_device_ctx = av_buffer_ref(hw_device);
595  if (!f->hw_device_ctx) {
596  ret = AVERROR(ENOMEM);
597  goto fail;
598  }
599  }
600  }
601 
602  ret = graph_opts_apply(logctx, seg);
603  if (ret < 0)
604  goto fail;
605 
607 
608 fail:
610  return ret;
611 }
612 
613 // Filters can be configured only if the formats of all inputs are known.
615 {
616  for (int i = 0; i < fg->nb_inputs; i++) {
618  if (ifp->format < 0)
619  return 0;
620  }
621  return 1;
622 }
623 
624 static int filter_thread(void *arg);
625 
626 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
627 {
628  AVFilterContext *ctx = inout->filter_ctx;
629  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
630  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
631 
632  if (nb_pads > 1)
633  return av_strdup(ctx->filter->name);
634  return av_asprintf("%s:%s", ctx->filter->name,
635  avfilter_pad_get_name(pads, inout->pad_idx));
636 }
637 
638 static const char *ofilter_item_name(void *obj)
639 {
640  OutputFilterPriv *ofp = obj;
641  return ofp->log_name;
642 }
643 
644 static const AVClass ofilter_class = {
645  .class_name = "OutputFilter",
646  .version = LIBAVUTIL_VERSION_INT,
647  .item_name = ofilter_item_name,
648  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
649  .category = AV_CLASS_CATEGORY_FILTER,
650 };
651 
653 {
654  OutputFilterPriv *ofp;
655  OutputFilter *ofilter;
656 
657  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
658  if (!ofp)
659  return NULL;
660 
661  ofilter = &ofp->ofilter;
662  ofilter->class = &ofilter_class;
663  ofp->log_parent = fg;
664  ofilter->graph = fg;
665  ofilter->type = type;
666  ofp->format = -1;
670  ofilter->index = fg->nb_outputs - 1;
671 
672  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
673  av_get_media_type_string(type)[0], ofilter->index);
674 
675  return ofilter;
676 }
677 
678 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
679  const ViewSpecifier *vs)
680 {
681  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
682  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
684  int ret;
685 
686  av_assert0(!ifp->bound);
687  ifp->bound = 1;
688 
689  if (ifilter->type != ist->par->codec_type &&
690  !(ifilter->type == AVMEDIA_TYPE_VIDEO && ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
691  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
693  return AVERROR(EINVAL);
694  }
695 
696  ifp->type_src = ist->st->codecpar->codec_type;
697 
698  ifp->opts.fallback = av_frame_alloc();
699  if (!ifp->opts.fallback)
700  return AVERROR(ENOMEM);
701 
702  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
703  vs, &ifp->opts, &src);
704  if (ret < 0)
705  return ret;
706 
707  ifilter->input_name = av_strdup(ifp->opts.name);
708  if (!ifilter->input_name)
709  return AVERROR(EINVAL);
710 
711  ret = sch_connect(fgp->sch,
712  src, SCH_FILTER_IN(fgp->sch_idx, ifilter->index));
713  if (ret < 0)
714  return ret;
715 
716  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
717  ifp->sub2video.frame = av_frame_alloc();
718  if (!ifp->sub2video.frame)
719  return AVERROR(ENOMEM);
720 
721  ifp->width = ifp->opts.sub2video_width;
722  ifp->height = ifp->opts.sub2video_height;
723 
724  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
725  palettes for all rectangles are identical or compatible */
726  ifp->format = AV_PIX_FMT_RGB32;
727 
728  ifp->time_base = AV_TIME_BASE_Q;
729 
730  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
731  ifp->width, ifp->height);
732  }
733 
734  return 0;
735 }
736 
738  const ViewSpecifier *vs)
739 {
742  int ret;
743 
744  av_assert0(!ifp->bound);
745  ifp->bound = 1;
746 
747  if (ifp->ifilter.type != dec->type) {
748  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
750  return AVERROR(EINVAL);
751  }
752 
753  ifp->type_src = ifp->ifilter.type;
754 
755  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
756  if (ret < 0)
757  return ret;
758 
759  ifp->ifilter.input_name = av_strdup(ifp->opts.name);
760  if (!ifp->ifilter.input_name)
761  return AVERROR(EINVAL);
762 
763  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
764  if (ret < 0)
765  return ret;
766 
767  return 0;
768 }
769 
770 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
771  const AVChannelLayout *layout_requested)
772 {
773  int i, err;
774 
775  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
776  /* Pass the layout through for all orders but UNSPEC */
777  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
778  if (err < 0)
779  return err;
780  return 0;
781  }
782 
783  /* Requested layout is of order UNSPEC */
784  if (!layouts_allowed) {
785  /* Use the default native layout for the requested amount of channels when the
786  encoder doesn't have a list of supported layouts */
787  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
788  return 0;
789  }
790  /* Encoder has a list of supported layouts. Pick the first layout in it with the
791  same amount of channels as the requested layout */
792  for (i = 0; layouts_allowed[i].nb_channels; i++) {
793  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
794  break;
795  }
796  if (layouts_allowed[i].nb_channels) {
797  /* Use it if one is found */
798  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
799  if (err < 0)
800  return err;
801  return 0;
802  }
803  /* If no layout for the amount of channels requested was found, use the default
804  native layout for it. */
805  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
806 
807  return 0;
808 }
809 
810 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
811  const OutputFilterOptions *opts)
812 {
813  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
814  FilterGraph *fg = ofilter->graph;
815  FilterGraphPriv *fgp = fgp_from_fg(fg);
816  int ret;
817 
818  av_assert0(!ofilter->bound);
819  av_assert0(!opts->enc ||
820  ofilter->type == opts->enc->type);
821 
822  ofp->needed = ofilter->bound = 1;
823  av_freep(&ofilter->linklabel);
824 
825  ofp->flags |= opts->flags;
826  ofp->ts_offset = opts->ts_offset;
827  ofp->enc_timebase = opts->output_tb;
828 
829  ofp->trim_start_us = opts->trim_start_us;
830  ofp->trim_duration_us = opts->trim_duration_us;
831 
832  ofilter->output_name = av_strdup(opts->name);
833  if (!ofilter->output_name)
834  return AVERROR(EINVAL);
835 
836  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
837  if (ret < 0)
838  return ret;
839 
840  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
841  if (ret < 0)
842  return ret;
843 
844  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
845  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
846 
847  if (fgp->is_simple) {
848  // for simple filtergraph there is just one output,
849  // so use only graph-level information for logging
850  ofp->log_parent = NULL;
851  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
852  } else
853  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
854 
855  switch (ofilter->type) {
856  case AVMEDIA_TYPE_VIDEO:
857  ofp->width = opts->width;
858  ofp->height = opts->height;
859  if (opts->format != AV_PIX_FMT_NONE) {
860  ofp->format = opts->format;
861  } else
862  ofp->formats = opts->formats;
863 
864  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
865  ofp->color_space = opts->color_space;
866  else
867  ofp->color_spaces = opts->color_spaces;
868 
869  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
870  ofp->color_range = opts->color_range;
871  else
872  ofp->color_ranges = opts->color_ranges;
873 
874  if (opts->alpha_mode != AVALPHA_MODE_UNSPECIFIED)
875  ofp->alpha_mode = opts->alpha_mode;
876  else
877  ofp->alpha_modes = opts->alpha_modes;
878 
880 
881  ofp->fps.last_frame = av_frame_alloc();
882  if (!ofp->fps.last_frame)
883  return AVERROR(ENOMEM);
884 
885  ofp->fps.vsync_method = opts->vsync_method;
886  ofp->fps.framerate = opts->frame_rate;
887  ofp->fps.framerate_max = opts->max_frame_rate;
888  ofp->fps.framerate_supported = opts->frame_rates;
889 
890  // reduce frame rate for mpeg4 to be within the spec limits
891  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
892  ofp->fps.framerate_clip = 65535;
893 
894  ofp->fps.dup_warning = 1000;
895 
896  break;
897  case AVMEDIA_TYPE_AUDIO:
898  if (opts->format != AV_SAMPLE_FMT_NONE) {
899  ofp->format = opts->format;
900  } else {
901  ofp->formats = opts->formats;
902  }
903  if (opts->sample_rate) {
904  ofp->sample_rate = opts->sample_rate;
905  } else
906  ofp->sample_rates = opts->sample_rates;
907  if (opts->ch_layout.nb_channels) {
908  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
909  if (ret < 0)
910  return ret;
911  } else {
912  ofp->ch_layouts = opts->ch_layouts;
913  }
914  break;
915  }
916 
917  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofilter->index),
918  SCH_ENC(sched_idx_enc));
919  if (ret < 0)
920  return ret;
921 
922  return 0;
923 }
924 
926  const OutputFilterOptions *opts)
927 {
928  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
929 
930  av_assert0(!ofilter->bound);
931  av_assert0(ofilter->type == ifp->ifilter.type);
932 
933  ofp->needed = ofilter->bound = 1;
934  av_freep(&ofilter->linklabel);
935 
936  ofilter->output_name = av_strdup(opts->name);
937  if (!ofilter->output_name)
938  return AVERROR(EINVAL);
939 
940  ifp->ofilter_src = ofilter;
941 
942  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
943 
944  return 0;
945 }
946 
947 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
948 {
950  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
952  char name[32];
953  int ret;
954 
955  av_assert0(!ifp->bound);
956  ifp->bound = 1;
957 
958  if (ifp->ifilter.type != ofilter_src->type) {
959  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
960  av_get_media_type_string(ofilter_src->type),
962  return AVERROR(EINVAL);
963  }
964 
965  ifp->type_src = ifp->ifilter.type;
966 
967  memset(&opts, 0, sizeof(opts));
968 
969  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->ifilter.index);
970  opts.name = name;
971 
972  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
973  if (ret < 0)
974  return ret;
975 
976  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
977  SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
978  if (ret < 0)
979  return ret;
980 
981  return 0;
982 }
983 
985 {
986  InputFilterPriv *ifp;
987  InputFilter *ifilter;
988 
989  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
990  if (!ifp)
991  return NULL;
992 
993  ifilter = &ifp->ifilter;
994  ifilter->graph = fg;
995 
996  ifp->frame = av_frame_alloc();
997  if (!ifp->frame)
998  return NULL;
999 
1000  ifilter->index = fg->nb_inputs - 1;
1001  ifp->format = -1;
1005 
1007  if (!ifp->frame_queue)
1008  return NULL;
1009 
1010  return ifilter;
1011 }
1012 
1014 {
1015  FilterGraph *fg = *pfg;
1016  FilterGraphPriv *fgp;
1017 
1018  if (!fg)
1019  return;
1020  fgp = fgp_from_fg(fg);
1021 
1022  for (int j = 0; j < fg->nb_inputs; j++) {
1023  InputFilter *ifilter = fg->inputs[j];
1024  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1025 
1026  if (ifp->frame_queue) {
1027  AVFrame *frame;
1028  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1029  av_frame_free(&frame);
1030  av_fifo_freep2(&ifp->frame_queue);
1031  }
1032  av_frame_free(&ifp->sub2video.frame);
1033 
1034  av_frame_free(&ifp->frame);
1035  av_frame_free(&ifp->opts.fallback);
1036 
1038  av_freep(&ifilter->linklabel);
1039  av_freep(&ifp->opts.name);
1041  av_freep(&ifilter->name);
1042  av_freep(&ifilter->input_name);
1043  av_freep(&fg->inputs[j]);
1044  }
1045  av_freep(&fg->inputs);
1046  for (int j = 0; j < fg->nb_outputs; j++) {
1047  OutputFilter *ofilter = fg->outputs[j];
1048  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1049 
1050  av_frame_free(&ofp->fps.last_frame);
1051  av_dict_free(&ofp->sws_opts);
1052  av_dict_free(&ofp->swr_opts);
1053 
1054  av_freep(&ofilter->linklabel);
1055  av_freep(&ofilter->name);
1056  av_freep(&ofilter->output_name);
1057  av_freep(&ofilter->apad);
1060  av_freep(&fg->outputs[j]);
1061  }
1062  av_freep(&fg->outputs);
1063  av_freep(&fg->graph_desc);
1064 
1065  av_frame_free(&fgp->frame);
1066  av_frame_free(&fgp->frame_enc);
1067 
1068  av_freep(pfg);
1069 }
1070 
1071 static const char *fg_item_name(void *obj)
1072 {
1073  const FilterGraphPriv *fgp = obj;
1074 
1075  return fgp->log_name;
1076 }
1077 
1078 static const AVClass fg_class = {
1079  .class_name = "FilterGraph",
1080  .version = LIBAVUTIL_VERSION_INT,
1081  .item_name = fg_item_name,
1082  .category = AV_CLASS_CATEGORY_FILTER,
1083 };
1084 
1085 int fg_create(FilterGraph **pfg, char **graph_desc, Scheduler *sch,
1086  const OutputFilterOptions *opts)
1087 {
1088  FilterGraphPriv *fgp;
1089  FilterGraph *fg;
1090 
1092  AVFilterGraph *graph;
1093  int ret = 0;
1094 
1095  fgp = av_mallocz(sizeof(*fgp));
1096  if (!fgp) {
1097  av_freep(graph_desc);
1098  return AVERROR(ENOMEM);
1099  }
1100  fg = &fgp->fg;
1101 
1102  if (pfg) {
1103  *pfg = fg;
1104  fg->index = -1;
1105  } else {
1107  if (ret < 0) {
1108  av_freep(graph_desc);
1109  av_freep(&fgp);
1110  return ret;
1111  }
1112 
1113  fg->index = nb_filtergraphs - 1;
1114  }
1115 
1116  fg->class = &fg_class;
1117  fg->graph_desc = *graph_desc;
1119  fgp->nb_threads = -1;
1120  fgp->sch = sch;
1121 
1122  *graph_desc = NULL;
1123 
1124  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1125 
1126  fgp->frame = av_frame_alloc();
1127  fgp->frame_enc = av_frame_alloc();
1128  if (!fgp->frame || !fgp->frame_enc)
1129  return AVERROR(ENOMEM);
1130 
1131  /* this graph is only used for determining the kinds of inputs
1132  * and outputs we have, and is discarded on exit from this function */
1133  graph = avfilter_graph_alloc();
1134  if (!graph)
1135  return AVERROR(ENOMEM);;
1136  graph->nb_threads = 1;
1137 
1138  ret = graph_parse(fg, graph, fg->graph_desc, &inputs, &outputs,
1140  if (ret < 0)
1141  goto fail;
1142 
1143  for (unsigned i = 0; i < graph->nb_filters; i++) {
1144  const AVFilter *f = graph->filters[i]->filter;
1145  if ((!avfilter_filter_pad_count(f, 0) &&
1146  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1147  !strcmp(f->name, "apad")) {
1148  fgp->have_sources = 1;
1149  break;
1150  }
1151  }
1152 
1153  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1154  InputFilter *const ifilter = ifilter_alloc(fg);
1155 
1156  if (!ifilter) {
1157  ret = AVERROR(ENOMEM);
1158  goto fail;
1159  }
1160 
1161  ifilter->linklabel = cur->name;
1162  cur->name = NULL;
1163 
1164  ifilter->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1165  cur->pad_idx);
1166 
1167  if (ifilter->type != AVMEDIA_TYPE_VIDEO && ifilter->type != AVMEDIA_TYPE_AUDIO) {
1168  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1169  "currently.\n");
1170  ret = AVERROR(ENOSYS);
1171  goto fail;
1172  }
1173 
1174  ifilter->name = describe_filter_link(fg, cur, 1);
1175  if (!ifilter->name) {
1176  ret = AVERROR(ENOMEM);
1177  goto fail;
1178  }
1179  }
1180 
1181  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1182  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1183  cur->pad_idx);
1184  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1185  OutputFilterPriv *ofp;
1186 
1187  if (!ofilter) {
1188  ret = AVERROR(ENOMEM);
1189  goto fail;
1190  }
1191  ofp = ofp_from_ofilter(ofilter);
1192 
1193  ofilter->linklabel = cur->name;
1194  cur->name = NULL;
1195 
1196  ofilter->name = describe_filter_link(fg, cur, 0);
1197  if (!ofilter->name) {
1198  ret = AVERROR(ENOMEM);
1199  goto fail;
1200  }
1201 
1202  // opts should only be needed in this function to fill fields from filtergraphs
1203  // whose output is meant to be treated as if it was stream, e.g. merged HEIF
1204  // tile groups.
1205  if (opts) {
1206  ofp->flags = opts->flags;
1207  ofp->side_data = opts->side_data;
1208  ofp->nb_side_data = opts->nb_side_data;
1209 
1210  ofp->crop_top = opts->crop_top;
1211  ofp->crop_bottom = opts->crop_bottom;
1212  ofp->crop_left = opts->crop_left;
1213  ofp->crop_right = opts->crop_right;
1214 
1217  if (sd)
1218  memcpy(ofp->displaymatrix, sd->data, sizeof(ofp->displaymatrix));
1219  }
1220  }
1221 
1222  if (!fg->nb_outputs) {
1223  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1224  ret = AVERROR(ENOSYS);
1225  goto fail;
1226  }
1227 
1228  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1229  filter_thread, fgp);
1230  if (ret < 0)
1231  goto fail;
1232  fgp->sch_idx = ret;
1233 
1234 fail:
1237  avfilter_graph_free(&graph);
1238 
1239  if (ret < 0)
1240  return ret;
1241 
1242  return 0;
1243 }
1244 
1246  InputStream *ist,
1247  char **graph_desc,
1248  Scheduler *sch, unsigned sched_idx_enc,
1249  const OutputFilterOptions *opts)
1250 {
1251  const enum AVMediaType type = ist->par->codec_type;
1252  FilterGraph *fg;
1253  FilterGraphPriv *fgp;
1254  int ret;
1255 
1256  ret = fg_create(pfg, graph_desc, sch, NULL);
1257  if (ret < 0)
1258  return ret;
1259  fg = *pfg;
1260  fgp = fgp_from_fg(fg);
1261 
1262  fgp->is_simple = 1;
1263 
1264  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1265  av_get_media_type_string(type)[0], opts->name);
1266 
1267  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1268  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1269  "to have exactly 1 input and 1 output. "
1270  "However, it had %d input(s) and %d output(s). Please adjust, "
1271  "or use a complex filtergraph (-filter_complex) instead.\n",
1272  *graph_desc, fg->nb_inputs, fg->nb_outputs);
1273  return AVERROR(EINVAL);
1274  }
1275  if (fg->outputs[0]->type != type) {
1276  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1277  "it to %s output stream\n",
1280  return AVERROR(EINVAL);
1281  }
1282 
1283  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1284  if (ret < 0)
1285  return ret;
1286 
1287  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1288  if (ret < 0)
1289  return ret;
1290 
1291  if (opts->nb_threads >= 0)
1292  fgp->nb_threads = opts->nb_threads;
1293 
1294  return 0;
1295 }
1296 
1297 static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter, int commit)
1298 {
1299  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1300  InputStream *ist = NULL;
1301  enum AVMediaType type = ifilter->type;
1303  const char *spec;
1304  char *p;
1305  int i, ret;
1306 
1307  if (ifilter->linklabel && !strncmp(ifilter->linklabel, "dec:", 4)) {
1308  // bind to a standalone decoder
1309  int dec_idx;
1310 
1311  dec_idx = strtol(ifilter->linklabel + 4, &p, 0);
1312  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1313  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1314  dec_idx, fg->graph_desc);
1315  return AVERROR(EINVAL);
1316  }
1317 
1318  if (type == AVMEDIA_TYPE_VIDEO) {
1319  spec = *p == ':' ? p + 1 : p;
1320  ret = view_specifier_parse(&spec, &vs);
1321  if (ret < 0)
1322  return ret;
1323  }
1324 
1325  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1326  if (ret < 0)
1327  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1328  ifilter->name);
1329  return ret;
1330  } else if (ifilter->linklabel) {
1332  AVFormatContext *s;
1333  AVStream *st = NULL;
1334  int file_idx;
1335 
1336  // try finding an unbound filtergraph output with this label
1337  for (int i = 0; i < nb_filtergraphs; i++) {
1338  FilterGraph *fg_src = filtergraphs[i];
1339 
1340  if (fg == fg_src)
1341  continue;
1342 
1343  for (int j = 0; j < fg_src->nb_outputs; j++) {
1344  OutputFilter *ofilter = fg_src->outputs[j];
1345 
1346  if (!ofilter->bound && ofilter->linklabel &&
1347  !strcmp(ofilter->linklabel, ifilter->linklabel)) {
1348  if (commit) {
1349  av_log(fg, AV_LOG_VERBOSE,
1350  "Binding input with label '%s' to filtergraph output %d:%d\n",
1351  ifilter->linklabel, i, j);
1352 
1353  ret = ifilter_bind_fg(ifp, fg_src, j);
1354  if (ret < 0) {
1355  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1356  ifilter->linklabel);
1357  return ret;
1358  }
1359  } else
1360  ofp_from_ofilter(ofilter)->needed = 1;
1361  return 0;
1362  }
1363  }
1364  }
1365 
1366  // bind to an explicitly specified demuxer stream
1367  file_idx = strtol(ifilter->linklabel, &p, 0);
1368  if (file_idx < 0 || file_idx >= nb_input_files) {
1369  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1370  file_idx, fg->graph_desc);
1371  return AVERROR(EINVAL);
1372  }
1373  s = input_files[file_idx]->ctx;
1374 
1375  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1376  if (ret < 0) {
1377  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1378  return ret;
1379  }
1380 
1381  if (type == AVMEDIA_TYPE_VIDEO) {
1382  spec = ss.remainder ? ss.remainder : "";
1383  ret = view_specifier_parse(&spec, &vs);
1384  if (ret < 0) {
1386  return ret;
1387  }
1388  }
1389 
1390  for (i = 0; i < s->nb_streams; i++) {
1391  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1392  if (stream_type != type &&
1393  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1394  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1395  continue;
1396  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1397  st = s->streams[i];
1398  break;
1399  }
1400  }
1402  if (!st) {
1403  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1404  "matches no streams.\n", p, fg->graph_desc);
1405  return AVERROR(EINVAL);
1406  }
1407  ist = input_files[file_idx]->streams[st->index];
1408 
1409  if (commit)
1410  av_log(fg, AV_LOG_VERBOSE,
1411  "Binding input with label '%s' to input stream %d:%d\n",
1412  ifilter->linklabel, ist->file->index, ist->index);
1413  } else {
1414  // try finding an unbound filtergraph output
1415  for (int i = 0; i < nb_filtergraphs; i++) {
1416  FilterGraph *fg_src = filtergraphs[i];
1417 
1418  if (fg == fg_src)
1419  continue;
1420 
1421  for (int j = 0; j < fg_src->nb_outputs; j++) {
1422  OutputFilter *ofilter = fg_src->outputs[j];
1423 
1424  if (!ofilter->bound) {
1425  if (commit) {
1426  av_log(fg, AV_LOG_VERBOSE,
1427  "Binding unlabeled filtergraph input to filtergraph output %d:%d\n", i, j);
1428 
1429  ret = ifilter_bind_fg(ifp, fg_src, j);
1430  if (ret < 0) {
1431  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %d:%d\n", i, j);
1432  return ret;
1433  }
1434  } else
1435  ofp_from_ofilter(ofilter)->needed = 1;
1436  return 0;
1437  }
1438  }
1439  }
1440 
1441  ist = ist_find_unused(type);
1442  if (!ist) {
1443  av_log(fg, AV_LOG_FATAL,
1444  "Cannot find an unused %s input stream to feed the "
1445  "unlabeled input pad %s.\n",
1446  av_get_media_type_string(type), ifilter->name);
1447  return AVERROR(EINVAL);
1448  }
1449 
1450  if (commit)
1451  av_log(fg, AV_LOG_VERBOSE,
1452  "Binding unlabeled input %d to input stream %d:%d\n",
1453  ifilter->index, ist->file->index, ist->index);
1454  }
1455  av_assert0(ist);
1456 
1457  if (commit) {
1458  ret = ifilter_bind_ist(ifilter, ist, &vs);
1459  if (ret < 0) {
1460  av_log(fg, AV_LOG_ERROR,
1461  "Error binding an input stream to complex filtergraph input %s.\n",
1462  ifilter->name);
1463  return ret;
1464  }
1465  }
1466 
1467  return 0;
1468 }
1469 
1470 static int bind_inputs(FilterGraph *fg, int commit)
1471 {
1472  // bind filtergraph inputs to input streams or other filtergraphs
1473  for (int i = 0; i < fg->nb_inputs; i++) {
1475  int ret;
1476 
1477  if (ifp->bound)
1478  continue;
1479 
1480  ret = fg_complex_bind_input(fg, &ifp->ifilter, commit);
1481  if (ret < 0)
1482  return ret;
1483  }
1484 
1485  return 0;
1486 }
1487 
1489 {
1490  int ret;
1491 
1492  for (int i = 0; i < nb_filtergraphs; i++) {
1493  ret = bind_inputs(filtergraphs[i], 0);
1494  if (ret < 0)
1495  return ret;
1496  }
1497 
1498  // check that all outputs were bound
1499  for (int i = nb_filtergraphs - 1; i >= 0; i--) {
1500  FilterGraph *fg = filtergraphs[i];
1502 
1503  for (int j = 0; j < fg->nb_outputs; j++) {
1504  OutputFilter *output = fg->outputs[j];
1505  if (!ofp_from_ofilter(output)->needed) {
1506  if (!fg->is_internal) {
1507  av_log(fg, AV_LOG_FATAL,
1508  "Filter '%s' has output %d (%s) unconnected\n",
1509  output->name, j,
1510  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1511  return AVERROR(EINVAL);
1512  }
1513 
1514  av_log(fg, AV_LOG_DEBUG,
1515  "Internal filter '%s' has output %d (%s) unconnected. Removing graph\n",
1516  output->name, j,
1517  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1518  sch_remove_filtergraph(fgp->sch, fgp->sch_idx);
1519  fg_free(&filtergraphs[i]);
1520  nb_filtergraphs--;
1521  if (nb_filtergraphs > 0)
1522  memmove(&filtergraphs[i],
1523  &filtergraphs[i + 1],
1524  (nb_filtergraphs - i) * sizeof(*filtergraphs));
1525  break;
1526  }
1527  }
1528  }
1529 
1530  for (int i = 0; i < nb_filtergraphs; i++) {
1531  ret = bind_inputs(filtergraphs[i], 1);
1532  if (ret < 0)
1533  return ret;
1534  }
1535 
1536  return 0;
1537 }
1538 
1539 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1540  AVFilterContext **last_filter, int *pad_idx,
1541  const char *filter_name)
1542 {
1543  AVFilterGraph *graph = (*last_filter)->graph;
1545  const AVFilter *trim;
1546  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1547  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1548  int ret = 0;
1549 
1550  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1551  return 0;
1552 
1553  trim = avfilter_get_by_name(name);
1554  if (!trim) {
1555  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1556  "recording time.\n", name);
1557  return AVERROR_FILTER_NOT_FOUND;
1558  }
1559 
1560  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1561  if (!ctx)
1562  return AVERROR(ENOMEM);
1563 
1564  if (duration != INT64_MAX) {
1565  ret = av_opt_set_int(ctx, "durationi", duration,
1567  }
1568  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1569  ret = av_opt_set_int(ctx, "starti", start_time,
1571  }
1572  if (ret < 0) {
1573  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1574  return ret;
1575  }
1576 
1578  if (ret < 0)
1579  return ret;
1580 
1581  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1582  if (ret < 0)
1583  return ret;
1584 
1585  *last_filter = ctx;
1586  *pad_idx = 0;
1587  return 0;
1588 }
1589 
1590 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1591  const char *filter_name, const char *args)
1592 {
1593  AVFilterGraph *graph = (*last_filter)->graph;
1594  const AVFilter *filter = avfilter_get_by_name(filter_name);
1596  int ret;
1597 
1598  if (!filter)
1599  return AVERROR_BUG;
1600 
1602  filter,
1603  filter_name, args, NULL, graph);
1604  if (ret < 0)
1605  return ret;
1606 
1607  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1608  if (ret < 0)
1609  return ret;
1610 
1611  *last_filter = ctx;
1612  *pad_idx = 0;
1613  return 0;
1614 }
1615 
1617  OutputFilter *ofilter, AVFilterInOut *out)
1618 {
1619  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1620  AVFilterContext *last_filter = out->filter_ctx;
1621  AVBPrint bprint;
1622  int pad_idx = out->pad_idx;
1623  int ret;
1624  char name[255];
1625 
1626  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1628  avfilter_get_by_name("buffersink"),
1629  name, NULL, NULL, graph);
1630 
1631  if (ret < 0)
1632  return ret;
1633 
1634  if (ofp->flags & OFILTER_FLAG_CROP) {
1635  char crop_buf[64];
1636  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1637  ofp->crop_left, ofp->crop_right,
1638  ofp->crop_top, ofp->crop_bottom,
1639  ofp->crop_left, ofp->crop_top);
1640  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1641  if (ret < 0)
1642  return ret;
1643  }
1644 
1645  if (ofp->flags & OFILTER_FLAG_AUTOROTATE) {
1646  int32_t *displaymatrix = ofp->displaymatrix;
1647  double theta;
1648 
1649  theta = get_rotation(displaymatrix);
1650 
1651  if (fabs(theta - 90) < 1.0) {
1652  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1653  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1654  } else if (fabs(theta - 180) < 1.0) {
1655  if (displaymatrix[0] < 0) {
1656  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1657  if (ret < 0)
1658  return ret;
1659  }
1660  if (displaymatrix[4] < 0) {
1661  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1662  }
1663  } else if (fabs(theta - 270) < 1.0) {
1664  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1665  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1666  } else if (fabs(theta) > 1.0) {
1667  char rotate_buf[64];
1668  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1669  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1670  } else if (fabs(theta) < 1.0) {
1671  if (displaymatrix && displaymatrix[4] < 0) {
1672  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1673  }
1674  }
1675  if (ret < 0)
1676  return ret;
1677 
1679  }
1680 
1681  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1682  char args[255];
1684  const AVDictionaryEntry *e = NULL;
1685 
1686  snprintf(args, sizeof(args), "%d:%d",
1687  ofp->width, ofp->height);
1688 
1689  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1690  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1691  }
1692 
1693  snprintf(name, sizeof(name), "scaler_out_%s", ofilter->output_name);
1695  name, args, NULL, graph)) < 0)
1696  return ret;
1697  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1698  return ret;
1699 
1700  last_filter = filter;
1701  pad_idx = 0;
1702  }
1703 
1705  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1707  choose_pix_fmts(ofp, &bprint);
1708  choose_color_spaces(ofp, &bprint);
1709  choose_color_ranges(ofp, &bprint);
1710  choose_alpha_modes(ofp, &bprint);
1711  if (!av_bprint_is_complete(&bprint))
1712  return AVERROR(ENOMEM);
1713 
1714  if (bprint.len) {
1716 
1718  avfilter_get_by_name("format"),
1719  "format", bprint.str, NULL, graph);
1720  av_bprint_finalize(&bprint, NULL);
1721  if (ret < 0)
1722  return ret;
1723  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1724  return ret;
1725 
1726  last_filter = filter;
1727  pad_idx = 0;
1728  }
1729 
1730  snprintf(name, sizeof(name), "trim_out_%s", ofilter->output_name);
1731  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1732  &last_filter, &pad_idx, name);
1733  if (ret < 0)
1734  return ret;
1735 
1736 
1737  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1738  return ret;
1739 
1740  return 0;
1741 }
1742 
1744  OutputFilter *ofilter, AVFilterInOut *out)
1745 {
1746  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1747  AVFilterContext *last_filter = out->filter_ctx;
1748  int pad_idx = out->pad_idx;
1749  AVBPrint args;
1750  char name[255];
1751  int ret;
1752 
1753  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1755  avfilter_get_by_name("abuffersink"),
1756  name, NULL, NULL, graph);
1757  if (ret < 0)
1758  return ret;
1759 
1760 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1761  AVFilterContext *filt_ctx; \
1762  \
1763  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1764  "similarly to -af " filter_name "=%s.\n", arg); \
1765  \
1766  ret = avfilter_graph_create_filter(&filt_ctx, \
1767  avfilter_get_by_name(filter_name), \
1768  filter_name, arg, NULL, graph); \
1769  if (ret < 0) \
1770  goto fail; \
1771  \
1772  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1773  if (ret < 0) \
1774  goto fail; \
1775  \
1776  last_filter = filt_ctx; \
1777  pad_idx = 0; \
1778 } while (0)
1780 
1781  choose_sample_fmts(ofp, &args);
1782  choose_sample_rates(ofp, &args);
1783  choose_channel_layouts(ofp, &args);
1784  if (!av_bprint_is_complete(&args)) {
1785  ret = AVERROR(ENOMEM);
1786  goto fail;
1787  }
1788  if (args.len) {
1790 
1791  snprintf(name, sizeof(name), "format_out_%s", ofilter->output_name);
1793  avfilter_get_by_name("aformat"),
1794  name, args.str, NULL, graph);
1795  if (ret < 0)
1796  goto fail;
1797 
1798  ret = avfilter_link(last_filter, pad_idx, format, 0);
1799  if (ret < 0)
1800  goto fail;
1801 
1802  last_filter = format;
1803  pad_idx = 0;
1804  }
1805 
1806  if (ofilter->apad) {
1807  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1808  fgp->have_sources = 1;
1809  }
1810 
1811  snprintf(name, sizeof(name), "trim for output %s", ofilter->output_name);
1812  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1813  &last_filter, &pad_idx, name);
1814  if (ret < 0)
1815  goto fail;
1816 
1817  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1818  goto fail;
1819 fail:
1820  av_bprint_finalize(&args, NULL);
1821 
1822  return ret;
1823 }
1824 
1826  OutputFilter *ofilter, AVFilterInOut *out)
1827 {
1828  switch (ofilter->type) {
1829  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1830  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1831  default: av_assert0(0); return 0;
1832  }
1833 }
1834 
1836 {
1837  ifp->sub2video.last_pts = INT64_MIN;
1838  ifp->sub2video.end_pts = INT64_MIN;
1839 
1840  /* sub2video structure has been (re-)initialized.
1841  Mark it as such so that the system will be
1842  initialized with the first received heartbeat. */
1843  ifp->sub2video.initialize = 1;
1844 }
1845 
1847  InputFilter *ifilter, AVFilterInOut *in)
1848 {
1849  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1850 
1851  AVFilterContext *last_filter;
1852  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1853  const AVPixFmtDescriptor *desc;
1854  char name[255];
1855  int ret, pad_idx = 0;
1857  if (!par)
1858  return AVERROR(ENOMEM);
1859 
1860  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1861  sub2video_prepare(ifp);
1862 
1863  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1864  ifp->opts.name);
1865 
1866  ifilter->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1867  if (!ifilter->filter) {
1868  ret = AVERROR(ENOMEM);
1869  goto fail;
1870  }
1871 
1872  par->format = ifp->format;
1873  par->time_base = ifp->time_base;
1874  par->frame_rate = ifp->opts.framerate;
1875  par->width = ifp->width;
1876  par->height = ifp->height;
1877  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1878  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1879  par->color_space = ifp->color_space;
1880  par->color_range = ifp->color_range;
1881  par->alpha_mode = ifp->alpha_mode;
1882  par->hw_frames_ctx = ifp->hw_frames_ctx;
1883  par->side_data = ifp->side_data;
1884  par->nb_side_data = ifp->nb_side_data;
1885 
1886  ret = av_buffersrc_parameters_set(ifilter->filter, par);
1887  if (ret < 0)
1888  goto fail;
1889  av_freep(&par);
1890 
1891  ret = avfilter_init_dict(ifilter->filter, NULL);
1892  if (ret < 0)
1893  goto fail;
1894 
1895  last_filter = ifilter->filter;
1896 
1898  av_assert0(desc);
1899 
1900  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1901  char crop_buf[64];
1902  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1903  ifp->opts.crop_left, ifp->opts.crop_right,
1904  ifp->opts.crop_top, ifp->opts.crop_bottom,
1905  ifp->opts.crop_left, ifp->opts.crop_top);
1906  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1907  if (ret < 0)
1908  return ret;
1909  }
1910 
1911  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1912  ifp->displaymatrix_applied = 0;
1913  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1914  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1915  int32_t *displaymatrix = ifp->displaymatrix;
1916  double theta;
1917 
1918  theta = get_rotation(displaymatrix);
1919 
1920  if (fabs(theta - 90) < 1.0) {
1921  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1922  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1923  } else if (fabs(theta - 180) < 1.0) {
1924  if (displaymatrix[0] < 0) {
1925  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1926  if (ret < 0)
1927  return ret;
1928  }
1929  if (displaymatrix[4] < 0) {
1930  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1931  }
1932  } else if (fabs(theta - 270) < 1.0) {
1933  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1934  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1935  } else if (fabs(theta) > 1.0) {
1936  char rotate_buf[64];
1937  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1938  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1939  } else if (fabs(theta) < 1.0) {
1940  if (displaymatrix && displaymatrix[4] < 0) {
1941  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1942  }
1943  }
1944  if (ret < 0)
1945  return ret;
1946 
1947  ifp->displaymatrix_applied = 1;
1948  }
1949 
1950  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1951  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1952  &last_filter, &pad_idx, name);
1953  if (ret < 0)
1954  return ret;
1955 
1956  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1957  return ret;
1958  return 0;
1959 fail:
1960  av_freep(&par);
1961 
1962  return ret;
1963 }
1964 
1966  InputFilter *ifilter, AVFilterInOut *in)
1967 {
1968  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1969  AVFilterContext *last_filter;
1970  AVBufferSrcParameters *par;
1971  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1972  AVBPrint args;
1973  char name[255];
1974  int ret, pad_idx = 0;
1975 
1977  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1978  ifp->time_base.num, ifp->time_base.den,
1979  ifp->sample_rate,
1981  if (av_channel_layout_check(&ifp->ch_layout) &&
1983  av_bprintf(&args, ":channel_layout=");
1985  } else
1986  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1987  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1988 
1989  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
1990  name, args.str, NULL,
1991  graph)) < 0)
1992  return ret;
1994  if (!par)
1995  return AVERROR(ENOMEM);
1996  par->side_data = ifp->side_data;
1997  par->nb_side_data = ifp->nb_side_data;
1998  ret = av_buffersrc_parameters_set(ifilter->filter, par);
1999  av_free(par);
2000  if (ret < 0)
2001  return ret;
2002  last_filter = ifilter->filter;
2003 
2004  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
2005  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
2006  &last_filter, &pad_idx, name);
2007  if (ret < 0)
2008  return ret;
2009 
2010  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
2011  return ret;
2012 
2013  return 0;
2014 }
2015 
2017  InputFilter *ifilter, AVFilterInOut *in)
2018 {
2019  switch (ifilter->type) {
2020  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
2021  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
2022  default: av_assert0(0); return 0;
2023  }
2024 }
2025 
2027 {
2028  for (int i = 0; i < fg->nb_outputs; i++)
2029  fg->outputs[i]->filter = NULL;
2030  for (int i = 0; i < fg->nb_inputs; i++)
2031  fg->inputs[i]->filter = NULL;
2032  avfilter_graph_free(&fgt->graph);
2033 }
2034 
2036 {
2037  return f->nb_inputs == 0 &&
2038  (!strcmp(f->filter->name, "buffer") ||
2039  !strcmp(f->filter->name, "abuffer"));
2040 }
2041 
2042 static int graph_is_meta(AVFilterGraph *graph)
2043 {
2044  for (unsigned i = 0; i < graph->nb_filters; i++) {
2045  const AVFilterContext *f = graph->filters[i];
2046 
2047  /* in addition to filters flagged as meta, also
2048  * disregard sinks and buffersources (but not other sources,
2049  * since they introduce data we are not aware of)
2050  */
2051  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
2052  f->nb_outputs == 0 ||
2054  return 0;
2055  }
2056  return 1;
2057 }
2058 
2059 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
2060 
2062 {
2063  FilterGraphPriv *fgp = fgp_from_fg(fg);
2064  AVBufferRef *hw_device;
2065  AVFilterInOut *inputs, *outputs, *cur;
2066  int ret = AVERROR_BUG, i, simple = filtergraph_is_simple(fg);
2067  int have_input_eof = 0;
2068  const char *graph_desc = fg->graph_desc;
2069 
2070  cleanup_filtergraph(fg, fgt);
2071  fgt->graph = avfilter_graph_alloc();
2072  if (!fgt->graph)
2073  return AVERROR(ENOMEM);
2074 
2075  if (simple) {
2076  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2077 
2078  if (filter_nbthreads) {
2079  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
2080  if (ret < 0)
2081  goto fail;
2082  } else if (fgp->nb_threads >= 0) {
2083  ret = av_opt_set_int(fgt->graph, "threads", fgp->nb_threads, 0);
2084  if (ret < 0)
2085  return ret;
2086  }
2087 
2088  if (av_dict_count(ofp->sws_opts)) {
2090  &fgt->graph->scale_sws_opts,
2091  '=', ':');
2092  if (ret < 0)
2093  goto fail;
2094  }
2095 
2096  if (av_dict_count(ofp->swr_opts)) {
2097  char *args;
2098  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
2099  if (ret < 0)
2100  goto fail;
2101  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
2102  av_free(args);
2103  }
2104  } else {
2106  }
2107 
2108  if (filter_buffered_frames) {
2109  ret = av_opt_set_int(fgt->graph, "max_buffered_frames", filter_buffered_frames, 0);
2110  if (ret < 0)
2111  return ret;
2112  }
2113 
2114  hw_device = hw_device_for_filter();
2115 
2116  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
2117  if (ret < 0)
2118  goto fail;
2119 
2120  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
2121  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
2124  goto fail;
2125  }
2127 
2128  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
2129  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
2130  if (ret < 0) {
2132  goto fail;
2133  }
2134  }
2136 
2137  if (fgp->disable_conversions)
2139  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
2140  goto fail;
2141 
2142  fgp->is_meta = graph_is_meta(fgt->graph);
2143 
2144  /* limit the lists of allowed formats to the ones selected, to
2145  * make sure they stay the same if the filtergraph is reconfigured later */
2146  for (int i = 0; i < fg->nb_outputs; i++) {
2147  const AVFrameSideData *const *sd;
2148  int nb_sd;
2149  OutputFilter *ofilter = fg->outputs[i];
2150  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2151  AVFilterContext *sink = ofilter->filter;
2152 
2153  ofp->format = av_buffersink_get_format(sink);
2154 
2155  ofp->width = av_buffersink_get_w(sink);
2156  ofp->height = av_buffersink_get_h(sink);
2160 
2161  // If the timing parameters are not locked yet, get the tentative values
2162  // here but don't lock them. They will only be used if no output frames
2163  // are ever produced.
2164  if (!ofp->tb_out_locked) {
2166  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
2167  fr.num > 0 && fr.den > 0)
2168  ofp->fps.framerate = fr;
2169  ofp->tb_out = av_buffersink_get_time_base(sink);
2170  }
2172 
2175  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
2176  if (ret < 0)
2177  goto fail;
2178  sd = av_buffersink_get_side_data(sink, &nb_sd);
2179  if (nb_sd)
2180  for (int j = 0; j < nb_sd; j++) {
2183  if (ret < 0) {
2185  goto fail;
2186  }
2187  }
2188  }
2189 
2190  for (int i = 0; i < fg->nb_inputs; i++) {
2191  InputFilter *ifilter = fg->inputs[i];
2193  AVFrame *tmp;
2194  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2195  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2196  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2197  } else {
2198  if (ifp->type_src == AVMEDIA_TYPE_VIDEO) {
2199  if (ifp->displaymatrix_applied)
2201  }
2202  ret = av_buffersrc_add_frame(ifilter->filter, tmp);
2203  }
2204  av_frame_free(&tmp);
2205  if (ret < 0)
2206  goto fail;
2207  }
2208  }
2209 
2210  /* send the EOFs for the finished inputs */
2211  for (int i = 0; i < fg->nb_inputs; i++) {
2212  InputFilter *ifilter = fg->inputs[i];
2213  if (fgt->eof_in[i]) {
2214  ret = av_buffersrc_add_frame(ifilter->filter, NULL);
2215  if (ret < 0)
2216  goto fail;
2217  have_input_eof = 1;
2218  }
2219  }
2220 
2221  if (have_input_eof) {
2222  // make sure the EOF propagates to the end of the graph
2224  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2225  goto fail;
2226  }
2227 
2228  return 0;
2229 fail:
2230  cleanup_filtergraph(fg, fgt);
2231  return ret;
2232 }
2233 
2235 {
2236  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2237  AVFrameSideData *sd;
2238  int ret;
2239 
2240  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2241  if (ret < 0)
2242  return ret;
2243 
2244  ifp->time_base = (ifilter->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2245  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2246  frame->time_base;
2247 
2248  ifp->format = frame->format;
2249 
2250  ifp->width = frame->width;
2251  ifp->height = frame->height;
2252  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2253  ifp->color_space = frame->colorspace;
2254  ifp->color_range = frame->color_range;
2255  ifp->alpha_mode = frame->alpha_mode;
2256 
2257  ifp->sample_rate = frame->sample_rate;
2258  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2259  if (ret < 0)
2260  return ret;
2261 
2263  for (int i = 0; i < frame->nb_side_data; i++) {
2264  const AVSideDataDescriptor *desc = av_frame_side_data_desc(frame->side_data[i]->type);
2265 
2266  if (!(desc->props & AV_SIDE_DATA_PROP_GLOBAL) ||
2267  frame->side_data[i]->type == AV_FRAME_DATA_DISPLAYMATRIX)
2268  continue;
2269 
2271  &ifp->nb_side_data,
2272  frame->side_data[i], 0);
2273  if (ret < 0)
2274  return ret;
2275  }
2276 
2278  if (sd)
2279  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2280  ifp->displaymatrix_present = !!sd;
2281 
2282  /* Copy downmix related side data to InputFilterPriv so it may be propagated
2283  * to the filter chain even though it's not "global", as filters like aresample
2284  * require this information during init and not when remixing a frame */
2286  if (sd) {
2288  &ifp->nb_side_data, sd, 0);
2289  if (ret < 0)
2290  return ret;
2291  memcpy(&ifp->downmixinfo, sd->data, sizeof(ifp->downmixinfo));
2292  }
2293  ifp->downmixinfo_present = !!sd;
2294 
2295  return 0;
2296 }
2297 
2299 {
2300  const OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2301  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2302 
2303  if (!ifp->opts.framerate.num) {
2304  ifp->opts.framerate = ofp->fps.framerate;
2305  if (ifp->opts.framerate.num > 0 && ifp->opts.framerate.den > 0)
2306  ifp->opts.flags |= IFILTER_FLAG_CFR;
2307  }
2308 
2309  for (int i = 0; i < ofp->nb_side_data; i++) {
2312  if (ret < 0)
2313  return ret;
2314  }
2315 
2316  return 0;
2317 }
2318 
2320 {
2321  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2322  return fgp->is_simple;
2323 }
2324 
2325 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2326  double time, const char *target,
2327  const char *command, const char *arg, int all_filters)
2328 {
2329  int ret;
2330 
2331  if (!graph)
2332  return;
2333 
2334  if (time < 0) {
2335  char response[4096];
2336  ret = avfilter_graph_send_command(graph, target, command, arg,
2337  response, sizeof(response),
2338  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2339  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2340  fg->index, ret, response);
2341  } else if (!all_filters) {
2342  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2343  } else {
2344  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2345  if (ret < 0)
2346  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2347  }
2348 }
2349 
2350 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2351 {
2352  int nb_requests, nb_requests_max = -1;
2353  int best_input = -1;
2354 
2355  for (int i = 0; i < fg->nb_inputs; i++) {
2356  InputFilter *ifilter = fg->inputs[i];
2357 
2358  if (fgt->eof_in[i])
2359  continue;
2360 
2361  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
2362  if (nb_requests > nb_requests_max) {
2363  nb_requests_max = nb_requests;
2364  best_input = i;
2365  }
2366  }
2367 
2368  av_assert0(best_input >= 0);
2369 
2370  return best_input;
2371 }
2372 
2374 {
2375  OutputFilter *ofilter = &ofp->ofilter;
2376  FPSConvContext *fps = &ofp->fps;
2377  AVRational tb = (AVRational){ 0, 0 };
2378  AVRational fr;
2379  const FrameData *fd;
2380 
2381  fd = frame_data_c(frame);
2382 
2383  // apply -enc_time_base
2384  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2385  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2386  av_log(ofp, AV_LOG_ERROR,
2387  "Demuxing timebase not available - cannot use it for encoding\n");
2388  return AVERROR(EINVAL);
2389  }
2390 
2391  switch (ofp->enc_timebase.num) {
2392  case 0: break;
2393  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2394  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2395  default: tb = ofp->enc_timebase; break;
2396  }
2397 
2398  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2399  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2400  goto finish;
2401  }
2402 
2403  fr = fps->framerate;
2404  if (!fr.num) {
2405  AVRational fr_sink = av_buffersink_get_frame_rate(ofilter->filter);
2406  if (fr_sink.num > 0 && fr_sink.den > 0)
2407  fr = fr_sink;
2408  }
2409 
2410  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2411  if (!fr.num && !fps->framerate_max.num) {
2412  fr = (AVRational){25, 1};
2413  av_log(ofp, AV_LOG_WARNING,
2414  "No information "
2415  "about the input framerate is available. Falling "
2416  "back to a default value of 25fps. Use the -r option "
2417  "if you want a different framerate.\n");
2418  }
2419 
2420  if (fps->framerate_max.num &&
2421  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2422  !fr.den))
2423  fr = fps->framerate_max;
2424  }
2425 
2426  if (fr.num > 0) {
2427  if (fps->framerate_supported) {
2428  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2429  fr = fps->framerate_supported[idx];
2430  }
2431  if (fps->framerate_clip) {
2432  av_reduce(&fr.num, &fr.den,
2433  fr.num, fr.den, fps->framerate_clip);
2434  }
2435  }
2436 
2437  if (!(tb.num > 0 && tb.den > 0))
2438  tb = av_inv_q(fr);
2439  if (!(tb.num > 0 && tb.den > 0))
2440  tb = frame->time_base;
2441 
2442  fps->framerate = fr;
2443 finish:
2444  ofp->tb_out = tb;
2445  ofp->tb_out_locked = 1;
2446 
2447  return 0;
2448 }
2449 
2450 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2451  AVRational tb_dst, int64_t start_time)
2452 {
2453  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2454 
2455  AVRational tb = tb_dst;
2456  AVRational filter_tb = frame->time_base;
2457  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2458 
2459  if (frame->pts == AV_NOPTS_VALUE)
2460  goto early_exit;
2461 
2462  tb.den <<= extra_bits;
2463  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2465  float_pts /= 1 << extra_bits;
2466  // when float_pts is not exactly an integer,
2467  // avoid exact midpoints to reduce the chance of rounding differences, this
2468  // can be removed in case the fps code is changed to work with integers
2469  if (float_pts != llrint(float_pts))
2470  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2471 
2472  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2474  frame->time_base = tb_dst;
2475 
2476 early_exit:
2477 
2478  if (debug_ts) {
2479  av_log(logctx, AV_LOG_INFO,
2480  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2481  frame ? av_ts2str(frame->pts) : "NULL",
2482  av_ts2timestr(frame->pts, &tb_dst),
2483  float_pts, tb_dst.num, tb_dst.den);
2484  }
2485 
2486  return float_pts;
2487 }
2488 
2490 {
2491  int64_t max2, min2, m;
2492 
2493  if (a >= b) {
2494  max2 = a;
2495  min2 = b;
2496  } else {
2497  max2 = b;
2498  min2 = a;
2499  }
2500  m = (c >= max2) ? max2 : c;
2501 
2502  return (m >= min2) ? m : min2;
2503 }
2504 
2505 
2506 /* Convert frame timestamps to the encoder timebase and decide how many times
2507  * should this (and possibly previous) frame be repeated in order to conform to
2508  * desired target framerate (if any).
2509  */
2511  int64_t *nb_frames, int64_t *nb_frames_prev)
2512 {
2513  OutputFilter *ofilter = &ofp->ofilter;
2514  FPSConvContext *fps = &ofp->fps;
2515  double delta0, delta, sync_ipts, duration;
2516 
2517  if (!frame) {
2518  *nb_frames_prev = *nb_frames = median3(fps->frames_prev_hist[0],
2519  fps->frames_prev_hist[1],
2520  fps->frames_prev_hist[2]);
2521 
2522  if (!*nb_frames && fps->last_dropped) {
2523  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2524  fps->last_dropped++;
2525  }
2526 
2527  goto finish;
2528  }
2529 
2530  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2531 
2532  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2533  ofp->tb_out, ofp->ts_offset);
2534  /* delta0 is the "drift" between the input frame and
2535  * where it would fall in the output. */
2536  delta0 = sync_ipts - ofp->next_pts;
2537  delta = delta0 + duration;
2538 
2539  // tracks the number of times the PREVIOUS frame should be duplicated,
2540  // mostly for variable framerate (VFR)
2541  *nb_frames_prev = 0;
2542  /* by default, we output a single frame */
2543  *nb_frames = 1;
2544 
2545  if (delta0 < 0 &&
2546  delta > 0 &&
2549  && fps->vsync_method != VSYNC_DROP
2550 #endif
2551  ) {
2552  if (delta0 < -0.6) {
2553  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2554  } else
2555  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2556  sync_ipts = ofp->next_pts;
2557  duration += delta0;
2558  delta0 = 0;
2559  }
2560 
2561  switch (fps->vsync_method) {
2562  case VSYNC_VSCFR:
2563  if (fps->frame_number == 0 && delta0 >= 0.5) {
2564  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2565  delta = duration;
2566  delta0 = 0;
2567  ofp->next_pts = llrint(sync_ipts);
2568  }
2569  case VSYNC_CFR:
2570  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2571  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2572  *nb_frames = 0;
2573  } else if (delta < -1.1)
2574  *nb_frames = 0;
2575  else if (delta > 1.1) {
2576  *nb_frames = llrintf(delta);
2577  if (delta0 > 1.1)
2578  *nb_frames_prev = llrintf(delta0 - 0.6);
2579  }
2580  frame->duration = 1;
2581  break;
2582  case VSYNC_VFR:
2583  if (delta <= -0.6)
2584  *nb_frames = 0;
2585  else if (delta > 0.6)
2586  ofp->next_pts = llrint(sync_ipts);
2587  frame->duration = llrint(duration);
2588  break;
2589 #if FFMPEG_OPT_VSYNC_DROP
2590  case VSYNC_DROP:
2591 #endif
2592  case VSYNC_PASSTHROUGH:
2593  ofp->next_pts = llrint(sync_ipts);
2594  frame->duration = llrint(duration);
2595  break;
2596  default:
2597  av_assert0(0);
2598  }
2599 
2600 finish:
2601  memmove(fps->frames_prev_hist + 1,
2602  fps->frames_prev_hist,
2603  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2604  fps->frames_prev_hist[0] = *nb_frames_prev;
2605 
2606  if (*nb_frames_prev == 0 && fps->last_dropped) {
2607  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2608  av_log(ofp, AV_LOG_VERBOSE,
2609  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2610  fps->frame_number, fps->last_frame->pts);
2611  }
2612  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2613  uint64_t nb_frames_dup;
2614  if (*nb_frames > dts_error_threshold * 30) {
2615  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2616  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2617  *nb_frames = 0;
2618  return;
2619  }
2620  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2621  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2622  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2623  if (nb_frames_dup > fps->dup_warning) {
2624  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2625  fps->dup_warning *= 10;
2626  }
2627  }
2628 
2629  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2630  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2631 }
2632 
2633 static void close_input(InputFilterPriv *ifp)
2634 {
2636 
2637  if (!ifp->eof) {
2639  ifp->eof = 1;
2640  }
2641 }
2642 
2644 {
2646  int ret;
2647 
2648  // we are finished and no frames were ever seen at this output,
2649  // at least initialize the encoder with a dummy frame
2650  if (!fgt->got_frame) {
2651  AVFrame *frame = fgt->frame;
2652  FrameData *fd;
2653 
2654  frame->time_base = ofp->tb_out;
2655  frame->format = ofp->format;
2656 
2657  frame->width = ofp->width;
2658  frame->height = ofp->height;
2659  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2660 
2661  frame->sample_rate = ofp->sample_rate;
2662  if (ofp->ch_layout.nb_channels) {
2663  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2664  if (ret < 0)
2665  return ret;
2666  }
2667 
2668  fd = frame_data(frame);
2669  if (!fd)
2670  return AVERROR(ENOMEM);
2671 
2674  ofp->side_data, ofp->nb_side_data, 0);
2675  if (ret < 0)
2676  return ret;
2677 
2678  fd->frame_rate_filter = ofp->fps.framerate;
2679 
2680  av_assert0(!frame->buf[0]);
2681 
2682  av_log(ofp, AV_LOG_WARNING,
2683  "No filtered frames for output stream, trying to "
2684  "initialize anyway.\n");
2685 
2686  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame);
2687  if (ret < 0) {
2689  return ret;
2690  }
2691  }
2692 
2693  fgt->eof_out[ofp->ofilter.index] = 1;
2694 
2695  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, NULL);
2696  return (ret == AVERROR_EOF) ? 0 : ret;
2697 }
2698 
2700  AVFrame *frame)
2701 {
2703  AVFrame *frame_prev = ofp->fps.last_frame;
2704  enum AVMediaType type = ofp->ofilter.type;
2705 
2706  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2707 
2708  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2709  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2710 
2711  for (int64_t i = 0; i < nb_frames; i++) {
2712  AVFrame *frame_out;
2713  int ret;
2714 
2715  if (type == AVMEDIA_TYPE_VIDEO) {
2716  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2717  frame_prev : frame;
2718  if (!frame_in)
2719  break;
2720 
2721  frame_out = fgp->frame_enc;
2722  ret = av_frame_ref(frame_out, frame_in);
2723  if (ret < 0)
2724  return ret;
2725 
2726  frame_out->pts = ofp->next_pts;
2727 
2728  if (ofp->fps.dropped_keyframe) {
2729  frame_out->flags |= AV_FRAME_FLAG_KEY;
2730  ofp->fps.dropped_keyframe = 0;
2731  }
2732  } else {
2733  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2734  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2736 
2737  frame->time_base = ofp->tb_out;
2738  frame->duration = av_rescale_q(frame->nb_samples,
2739  (AVRational){ 1, frame->sample_rate },
2740  ofp->tb_out);
2741 
2742  ofp->next_pts = frame->pts + frame->duration;
2743 
2744  frame_out = frame;
2745  }
2746 
2747  // send the frame to consumers
2748  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame_out);
2749  if (ret < 0) {
2750  av_frame_unref(frame_out);
2751 
2752  if (!fgt->eof_out[ofp->ofilter.index]) {
2753  fgt->eof_out[ofp->ofilter.index] = 1;
2754  fgp->nb_outputs_done++;
2755  }
2756 
2757  return ret == AVERROR_EOF ? 0 : ret;
2758  }
2759 
2760  if (type == AVMEDIA_TYPE_VIDEO) {
2761  ofp->fps.frame_number++;
2762  ofp->next_pts++;
2763 
2764  if (i == nb_frames_prev && frame)
2765  frame->flags &= ~AV_FRAME_FLAG_KEY;
2766  }
2767 
2768  fgt->got_frame = 1;
2769  }
2770 
2771  if (frame && frame_prev) {
2772  av_frame_unref(frame_prev);
2773  av_frame_move_ref(frame_prev, frame);
2774  }
2775 
2776  if (!frame)
2777  return close_output(ofp, fgt);
2778 
2779  return 0;
2780 }
2781 
2783  AVFrame *frame)
2784 {
2787  FrameData *fd;
2788  int ret;
2789 
2792  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->ofilter.index]) {
2793  ret = fg_output_frame(ofp, fgt, NULL);
2794  return (ret < 0) ? ret : 1;
2795  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2796  return 1;
2797  } else if (ret < 0) {
2798  av_log(ofp, AV_LOG_WARNING,
2799  "Error in retrieving a frame from the filtergraph: %s\n",
2800  av_err2str(ret));
2801  return ret;
2802  }
2803 
2804  if (fgt->eof_out[ofp->ofilter.index]) {
2806  return 0;
2807  }
2808 
2810 
2811  if (debug_ts)
2812  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2813  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2814  frame->time_base.num, frame->time_base.den);
2815 
2816  // Choose the output timebase the first time we get a frame.
2817  if (!ofp->tb_out_locked) {
2818  ret = choose_out_timebase(ofp, frame);
2819  if (ret < 0) {
2820  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2822  return ret;
2823  }
2824  }
2825 
2826  fd = frame_data(frame);
2827  if (!fd) {
2829  return AVERROR(ENOMEM);
2830  }
2831 
2833  if (!fgt->got_frame) {
2835  ofp->side_data, ofp->nb_side_data, 0);
2836  if (ret < 0)
2837  return ret;
2838  }
2839 
2841 
2842  // only use bits_per_raw_sample passed through from the decoder
2843  // if the filtergraph did not touch the frame data
2844  if (!fgp->is_meta)
2845  fd->bits_per_raw_sample = 0;
2846 
2847  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2848  if (!frame->duration) {
2850  if (fr.num > 0 && fr.den > 0)
2851  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2852  }
2853 
2854  fd->frame_rate_filter = ofp->fps.framerate;
2855  }
2856 
2857  ret = fg_output_frame(ofp, fgt, frame);
2859  if (ret < 0)
2860  return ret;
2861 
2862  return 0;
2863 }
2864 
2865 /* retrieve all frames available at filtergraph outputs
2866  * and send them to consumers */
2868  AVFrame *frame)
2869 {
2870  FilterGraphPriv *fgp = fgp_from_fg(fg);
2871  int did_step = 0;
2872 
2873  // graph not configured, just select the input to request
2874  if (!fgt->graph) {
2875  for (int i = 0; i < fg->nb_inputs; i++) {
2877  if (ifp->format < 0 && !fgt->eof_in[i]) {
2878  fgt->next_in = i;
2879  return 0;
2880  }
2881  }
2882 
2883  // This state - graph is not configured, but all inputs are either
2884  // initialized or EOF - should be unreachable because sending EOF to a
2885  // filter without even a fallback format should fail
2886  av_assert0(0);
2887  return AVERROR_BUG;
2888  }
2889 
2890  while (fgp->nb_outputs_done < fg->nb_outputs) {
2891  int ret;
2892 
2893  /* Reap all buffers present in the buffer sinks */
2894  for (int i = 0; i < fg->nb_outputs; i++) {
2896 
2897  ret = 0;
2898  while (!ret) {
2899  ret = fg_output_step(ofp, fgt, frame);
2900  if (ret < 0)
2901  return ret;
2902  }
2903  }
2904 
2905  // return after one iteration, so that scheduler can rate-control us
2906  if (did_step && fgp->have_sources)
2907  return 0;
2908 
2910  if (ret == AVERROR(EAGAIN)) {
2911  fgt->next_in = choose_input(fg, fgt);
2912  return 0;
2913  } else if (ret < 0) {
2914  if (ret == AVERROR_EOF)
2915  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2916  else
2917  av_log(fg, AV_LOG_ERROR,
2918  "Error requesting a frame from the filtergraph: %s\n",
2919  av_err2str(ret));
2920  return ret;
2921  }
2922  fgt->next_in = fg->nb_inputs;
2923 
2924  did_step = 1;
2925  }
2926 
2927  return AVERROR_EOF;
2928 }
2929 
2931 {
2932  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2933  int64_t pts2;
2934 
2935  /* subtitles seem to be usually muxed ahead of other streams;
2936  if not, subtracting a larger time here is necessary */
2937  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2938 
2939  /* do not send the heartbeat frame if the subtitle is already ahead */
2940  if (pts2 <= ifp->sub2video.last_pts)
2941  return;
2942 
2943  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2944  /* if we have hit the end of the current displayed subpicture,
2945  or if we need to initialize the system, update the
2946  overlaid subpicture and its start/end times */
2947  sub2video_update(ifp, pts2 + 1, NULL);
2948  else
2949  sub2video_push_ref(ifp, pts2);
2950 }
2951 
2952 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2953 {
2954  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2955  int ret;
2956 
2957  if (buffer) {
2958  AVFrame *tmp;
2959 
2960  if (!frame)
2961  return 0;
2962 
2963  tmp = av_frame_alloc();
2964  if (!tmp)
2965  return AVERROR(ENOMEM);
2966 
2968 
2969  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2970  if (ret < 0) {
2971  av_frame_free(&tmp);
2972  return ret;
2973  }
2974 
2975  return 0;
2976  }
2977 
2978  // heartbeat frame
2979  if (frame && !frame->buf[0]) {
2980  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2981  return 0;
2982  }
2983 
2984  if (!frame) {
2985  if (ifp->sub2video.end_pts < INT64_MAX)
2986  sub2video_update(ifp, INT64_MAX, NULL);
2987 
2988  return av_buffersrc_add_frame(ifilter->filter, NULL);
2989  }
2990 
2991  ifp->width = frame->width ? frame->width : ifp->width;
2992  ifp->height = frame->height ? frame->height : ifp->height;
2993 
2994  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2995 
2996  return 0;
2997 }
2998 
2999 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
3000  int64_t pts, AVRational tb)
3001 {
3002  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
3003  int ret;
3004 
3005  if (fgt->eof_in[ifilter->index])
3006  return 0;
3007 
3008  fgt->eof_in[ifilter->index] = 1;
3009 
3010  if (ifilter->filter) {
3011  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
3013 
3015  if (ret < 0)
3016  return ret;
3017  } else {
3018  if (ifp->format < 0) {
3019  // the filtergraph was never configured, use the fallback parameters
3020  ifp->format = ifp->opts.fallback->format;
3021  ifp->sample_rate = ifp->opts.fallback->sample_rate;
3022  ifp->width = ifp->opts.fallback->width;
3023  ifp->height = ifp->opts.fallback->height;
3025  ifp->color_space = ifp->opts.fallback->colorspace;
3026  ifp->color_range = ifp->opts.fallback->color_range;
3027  ifp->alpha_mode = ifp->opts.fallback->alpha_mode;
3028  ifp->time_base = ifp->opts.fallback->time_base;
3029 
3031  &ifp->opts.fallback->ch_layout);
3032  if (ret < 0)
3033  return ret;
3034 
3036  ret = clone_side_data(&ifp->side_data, &ifp->nb_side_data,
3037  ifp->opts.fallback->side_data,
3038  ifp->opts.fallback->nb_side_data, 0);
3039  if (ret < 0)
3040  return ret;
3041 
3042  if (ifilter_has_all_input_formats(ifilter->graph)) {
3043  ret = configure_filtergraph(ifilter->graph, fgt);
3044  if (ret < 0) {
3045  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
3046  return ret;
3047  }
3048  }
3049  }
3050 
3051  if (ifp->format < 0) {
3052  av_log(ifilter->graph, AV_LOG_ERROR,
3053  "Cannot determine format of input %s after EOF\n",
3054  ifp->opts.name);
3055  return AVERROR_INVALIDDATA;
3056  }
3057  }
3058 
3059  return 0;
3060 }
3061 
3063  VIDEO_CHANGED = (1 << 0),
3064  AUDIO_CHANGED = (1 << 1),
3065  MATRIX_CHANGED = (1 << 2),
3066  DOWNMIX_CHANGED = (1 << 3),
3067  HWACCEL_CHANGED = (1 << 4)
3068 };
3069 
3070 static const char *unknown_if_null(const char *str)
3071 {
3072  return str ? str : "unknown";
3073 }
3074 
3076  InputFilter *ifilter, AVFrame *frame)
3077 {
3078  FilterGraphPriv *fgp = fgp_from_fg(fg);
3079  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
3080  FrameData *fd;
3081  AVFrameSideData *sd;
3082  int need_reinit = 0, ret;
3083 
3084  /* determine if the parameters for this input changed */
3085  switch (ifilter->type) {
3086  case AVMEDIA_TYPE_AUDIO:
3087  if (ifp->format != frame->format ||
3088  ifp->sample_rate != frame->sample_rate ||
3089  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
3090  need_reinit |= AUDIO_CHANGED;
3091  break;
3092  case AVMEDIA_TYPE_VIDEO:
3093  if (ifp->format != frame->format ||
3094  ifp->width != frame->width ||
3095  ifp->height != frame->height ||
3096  ifp->color_space != frame->colorspace ||
3097  ifp->color_range != frame->color_range ||
3098  ifp->alpha_mode != frame->alpha_mode)
3099  need_reinit |= VIDEO_CHANGED;
3100  break;
3101  }
3102 
3104  if (!ifp->displaymatrix_present ||
3105  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
3106  need_reinit |= MATRIX_CHANGED;
3107  } else if (ifp->displaymatrix_present)
3108  need_reinit |= MATRIX_CHANGED;
3109 
3111  if (!ifp->downmixinfo_present ||
3112  memcmp(sd->data, &ifp->downmixinfo, sizeof(ifp->downmixinfo)))
3113  need_reinit |= DOWNMIX_CHANGED;
3114  } else if (ifp->downmixinfo_present)
3115  need_reinit |= DOWNMIX_CHANGED;
3116 
3117  if (need_reinit && fgt->graph && (ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)) {
3118  ifp->nb_dropped++;
3119  av_log_once(fg, AV_LOG_WARNING, AV_LOG_DEBUG, &ifp->drop_warned, "Avoiding reinit; dropping frame pts: %s bound for %s\n", av_ts2str(frame->pts), ifilter->name);
3121  return 0;
3122  }
3123 
3124  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
3125  need_reinit = 0;
3126 
3127  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
3128  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
3129  need_reinit |= HWACCEL_CHANGED;
3130 
3131  if (need_reinit) {
3133  if (ret < 0)
3134  return ret;
3135 
3136  /* Inputs bound to a filtergraph output will have some fields unset.
3137  * Handle them here */
3138  if (ifp->ofilter_src) {
3140  if (ret < 0)
3141  return ret;
3142  }
3143  }
3144 
3145  /* (re)init the graph if possible, otherwise buffer the frame and return */
3146  if (need_reinit || !fgt->graph) {
3147  AVFrame *tmp = av_frame_alloc();
3148 
3149  if (!tmp)
3150  return AVERROR(ENOMEM);
3151 
3152  if (!ifilter_has_all_input_formats(fg)) {
3154 
3155  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
3156  if (ret < 0)
3157  av_frame_free(&tmp);
3158 
3159  return ret;
3160  }
3161 
3162  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
3163  av_frame_free(&tmp);
3164  if (ret < 0)
3165  return ret;
3166 
3167  if (fgt->graph) {
3168  AVBPrint reason;
3170  if (need_reinit & AUDIO_CHANGED) {
3171  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
3172  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
3173  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
3174  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
3175  }
3176  if (need_reinit & VIDEO_CHANGED) {
3177  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
3178  const char *color_space_name = av_color_space_name(frame->colorspace);
3179  const char *color_range_name = av_color_range_name(frame->color_range);
3180  const char *alpha_mode = av_alpha_mode_name(frame->alpha_mode);
3181  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, %s alpha,",
3182  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
3183  unknown_if_null(color_space_name), frame->width, frame->height,
3184  unknown_if_null(alpha_mode));
3185  }
3186  if (need_reinit & MATRIX_CHANGED)
3187  av_bprintf(&reason, "display matrix changed, ");
3188  if (need_reinit & DOWNMIX_CHANGED)
3189  av_bprintf(&reason, "downmix medatata changed, ");
3190  if (need_reinit & HWACCEL_CHANGED)
3191  av_bprintf(&reason, "hwaccel changed, ");
3192  if (reason.len > 1)
3193  reason.str[reason.len - 2] = '\0'; // remove last comma
3194  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
3195  } else {
3196  /* Choke all input to avoid buffering excessive frames while the
3197  * initial filter graph is being configured, and before we have a
3198  * preferred input */
3199  sch_filter_choke_inputs(fgp->sch, fgp->sch_idx);
3200  }
3201 
3202  ret = configure_filtergraph(fg, fgt);
3203  if (ret < 0) {
3204  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
3205  return ret;
3206  }
3207  }
3208 
3209  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
3210  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
3211  frame->time_base = ifp->time_base;
3212 
3213  if (ifp->displaymatrix_applied)
3215 
3216  fd = frame_data(frame);
3217  if (!fd)
3218  return AVERROR(ENOMEM);
3220 
3223  if (ret < 0) {
3225  if (ret != AVERROR_EOF)
3226  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
3227  return ret;
3228  }
3229 
3230  return 0;
3231 }
3232 
3233 static void fg_thread_set_name(const FilterGraph *fg)
3234 {
3235  char name[16];
3236  if (filtergraph_is_simple(fg)) {
3237  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
3238  snprintf(name, sizeof(name), "%cf%s",
3240  ofp->ofilter.output_name);
3241  } else {
3242  snprintf(name, sizeof(name), "fc%d", fg->index);
3243  }
3244 
3246 }
3247 
3249 {
3250  if (fgt->frame_queue_out) {
3251  AVFrame *frame;
3252  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
3253  av_frame_free(&frame);
3255  }
3256 
3257  av_frame_free(&fgt->frame);
3258  av_freep(&fgt->eof_in);
3259  av_freep(&fgt->eof_out);
3260 
3261  avfilter_graph_free(&fgt->graph);
3262 
3263  memset(fgt, 0, sizeof(*fgt));
3264 }
3265 
3266 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
3267 {
3268  memset(fgt, 0, sizeof(*fgt));
3269 
3270  fgt->frame = av_frame_alloc();
3271  if (!fgt->frame)
3272  goto fail;
3273 
3274  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
3275  if (!fgt->eof_in)
3276  goto fail;
3277 
3278  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
3279  if (!fgt->eof_out)
3280  goto fail;
3281 
3283  if (!fgt->frame_queue_out)
3284  goto fail;
3285 
3286  return 0;
3287 
3288 fail:
3289  fg_thread_uninit(fgt);
3290  return AVERROR(ENOMEM);
3291 }
3292 
3293 static int filter_thread(void *arg)
3294 {
3295  FilterGraphPriv *fgp = arg;
3296  FilterGraph *fg = &fgp->fg;
3297 
3298  FilterGraphThread fgt;
3299  int ret = 0, input_status = 0;
3300 
3301  ret = fg_thread_init(&fgt, fg);
3302  if (ret < 0)
3303  goto finish;
3304 
3305  fg_thread_set_name(fg);
3306 
3307  // if we have all input parameters the graph can now be configured
3309  ret = configure_filtergraph(fg, &fgt);
3310  if (ret < 0) {
3311  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
3312  av_err2str(ret));
3313  goto finish;
3314  }
3315  }
3316 
3317  while (1) {
3318  InputFilter *ifilter;
3319  InputFilterPriv *ifp = NULL;
3320  enum FrameOpaque o;
3321  unsigned input_idx = fgt.next_in;
3322 
3323  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
3324  &input_idx, fgt.frame);
3325  if (input_status == AVERROR_EOF) {
3326  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3327  break;
3328  } else if (input_status == AVERROR(EAGAIN)) {
3329  // should only happen when we didn't request any input
3330  av_assert0(input_idx == fg->nb_inputs);
3331  goto read_frames;
3332  }
3333  av_assert0(input_status >= 0);
3334 
3335  o = (intptr_t)fgt.frame->opaque;
3336 
3337  o = (intptr_t)fgt.frame->opaque;
3338 
3339  // message on the control stream
3340  if (input_idx == fg->nb_inputs) {
3341  FilterCommand *fc;
3342 
3343  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3344 
3345  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3346  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3347  fc->all_filters);
3348  av_frame_unref(fgt.frame);
3349  continue;
3350  }
3351 
3352  // we received an input frame or EOF
3353  ifilter = fg->inputs[input_idx];
3354  ifp = ifp_from_ifilter(ifilter);
3355 
3356  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3357  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3358  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3359  !fgt.graph);
3360  } else if (fgt.frame->buf[0]) {
3361  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3362  } else {
3364  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3365  }
3366  av_frame_unref(fgt.frame);
3367  if (ret == AVERROR_EOF) {
3368  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3369  input_idx);
3370  close_input(ifp);
3371  continue;
3372  }
3373  if (ret < 0)
3374  goto finish;
3375 
3376 read_frames:
3377  // retrieve all newly available frames
3378  ret = read_frames(fg, &fgt, fgt.frame);
3379  if (ret == AVERROR_EOF) {
3380  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3381  if (ifp && ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)
3382  av_log(fg, AV_LOG_INFO, "Total changed input frames dropped : %"PRId64"\n", ifp->nb_dropped);
3383  break;
3384  } else if (ret < 0) {
3385  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3386  av_err2str(ret));
3387  goto finish;
3388  }
3389 
3390  // ensure all inputs no longer accepting data are closed
3391  for (int i = 0; fgt.graph && i < fg->nb_inputs; i++) {
3394  close_input(ifp);
3395  }
3396  }
3397 
3398  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3400 
3401  if (fgt.eof_out[i] || !fgt.graph)
3402  continue;
3403 
3404  ret = fg_output_frame(ofp, &fgt, NULL);
3405  if (ret < 0)
3406  goto finish;
3407  }
3408 
3409 finish:
3410 
3412  print_filtergraph(fg, fgt.graph);
3413 
3414  // EOF is normal termination
3415  if (ret == AVERROR_EOF)
3416  ret = 0;
3417 
3418  fg_thread_uninit(&fgt);
3419 
3420  return ret;
3421 }
3422 
3423 void fg_send_command(FilterGraph *fg, double time, const char *target,
3424  const char *command, const char *arg, int all_filters)
3425 {
3426  FilterGraphPriv *fgp = fgp_from_fg(fg);
3427  AVBufferRef *buf;
3428  FilterCommand *fc;
3429 
3430  fc = av_mallocz(sizeof(*fc));
3431  if (!fc)
3432  return;
3433 
3434  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3435  if (!buf) {
3436  av_freep(&fc);
3437  return;
3438  }
3439 
3440  fc->target = av_strdup(target);
3441  fc->command = av_strdup(command);
3442  fc->arg = av_strdup(arg);
3443  if (!fc->target || !fc->command || !fc->arg) {
3444  av_buffer_unref(&buf);
3445  return;
3446  }
3447 
3448  fc->time = time;
3449  fc->all_filters = all_filters;
3450 
3451  fgp->frame->buf[0] = buf;
3452  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3453 
3454  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3455 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
InputFilterPriv::nb_dropped
uint64_t nb_dropped
Definition: ffmpeg_filter.c:119
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2090
formats
formats
Definition: signature.h:47
AVBufferSrcParameters::side_data
AVFrameSideData ** side_data
Definition: buffersrc.h:124
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:2016
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:92
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:469
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:678
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:367
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:719
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:631
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:386
av_clip
#define av_clip
Definition: common.h:100
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2574
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:384
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:309
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:70
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:109
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2095
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2350
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1553
FrameData::nb_side_data
int nb_side_data
Definition: ffmpeg.h:737
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:62
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:440
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:66
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:94
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:615
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:131
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:987
FrameData
Definition: ffmpeg.h:715
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2325
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:152
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:287
OutputFilter::apad
char * apad
Definition: ffmpeg.h:399
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:659
clone_side_data
static int clone_side_data(AVFrameSideData ***dst, int *nb_dst, AVFrameSideData *const *src, int nb_src, unsigned int flags)
Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
Definition: ffmpeg_utils.h:50
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:421
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:411
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:155
AVBufferSrcParameters::nb_side_data
int nb_side_data
Definition: buffersrc.h:125
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:285
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:37
AVFrame::nb_side_data
int nb_side_data
Definition: frame.h:625
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2234
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1011
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:644
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:3067
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:62
close_input
static void close_input(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:2633
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:1042
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:133
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1825
av_alpha_mode_name
const char * av_alpha_mode_name(enum AVAlphaMode mode)
Definition: pixdesc.c:3921
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:254
AVSubtitleRect
Definition: avcodec.h:2063
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2094
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1757
OutputFilterPriv::crop_left
unsigned crop_left
Definition: ffmpeg_filter.c:206
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:1013
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:172
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:565
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:689
InputFile::index
int index
Definition: ffmpeg.h:525
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:757
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
AVFrame::width
int width
Definition: frame.h:499
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:47
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:54
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:810
AVOption
AVOption.
Definition: opt.h:429
InputFilterPriv::ofilter_src
OutputFilter * ofilter_src
Definition: ffmpeg_filter.c:110
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2699
b
#define b
Definition: input.c:42
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:190
FilterGraph::index
int index
Definition: ffmpeg.h:409
OutputFilter::index
int index
Definition: ffmpeg.h:388
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:130
data
const char data[16]
Definition: mxf.c:149
InputFilter::index
int index
Definition: ffmpeg.h:369
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:176
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:239
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2026
OutputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:202
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:412
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:3063
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:129
AVDictionary
Definition: dict.c:32
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:671
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:246
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: side_data.c:248
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:264
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1743
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:604
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:326
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2782
FilterGraphPriv
Definition: ffmpeg_filter.c:43
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:97
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2061
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:191
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1002
InputStream
Definition: ffmpeg.h:476
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:76
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:70
OutputFilterOptions
Definition: ffmpeg.h:307
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:272
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:293
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:140
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:167
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:89
OutputFilterPriv
Definition: ffmpeg_filter.c:187
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:3248
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:489
fail
#define fail()
Definition: checkasm.h:214
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:372
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:325
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:85
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
print_filtergraph
int print_filtergraph(FilterGraph *fg, AVFilterGraph *graph)
Definition: graphprint.c:954
samplefmt.h
OutputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:209
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:271
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:614
AVFrame::alpha_mode
enum AVAlphaMode alpha_mode
Indicates how the alpha channel of the video is to be handled.
Definition: frame.h:782
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:770
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1846
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
AVDownmixInfo
This structure describes optional metadata relevant to a downmix procedure.
Definition: downmix_info.h:58
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:2042
median3
static int64_t median3(int64_t a, int64_t b, int64_t c)
Definition: ffmpeg_filter.c:2489
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:84
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:156
FrameData::tb
AVRational tb
Definition: ffmpeg.h:725
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:221
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:70
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:198
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputFilterPriv::sub2video
struct InputFilterPriv::@8 sub2video
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:177
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:396
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:103
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:273
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:39
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:839
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2930
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
OutputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:210
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:236
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:728
InputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:136
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2999
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:101
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:347
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:984
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:933
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:260
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:71
llrintf
#define llrintf(x)
Definition: libm.h:401
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:678
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:64
DOWNMIX_CHANGED
@ DOWNMIX_CHANGED
Definition: ffmpeg_filter.c:3066
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:107
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:413
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:638
AVDictionaryEntry::key
char * key
Definition: dict.h:91
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:118
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:737
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:397
InputFilter
Definition: ffmpeg.h:366
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:57
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:494
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:302
print_graphs_file
char * print_graphs_file
Definition: ffmpeg_opt.c:81
InputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:380
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2096
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:3266
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:275
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:284
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:367
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:296
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:241
av_buffersink_get_alpha_mode
enum AVAlphaMode av_buffersink_get_alpha_mode(const AVFilterContext *ctx)
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1071
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1187
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
arg
const char * arg
Definition: jacosubdec.c:65
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:227
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:197
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:286
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3772
AVBufferSrcParameters::alpha_mode
enum AVAlphaMode alpha_mode
Video only, the alpha mode.
Definition: buffersrc.h:130
AVFormatContext
Format I/O context.
Definition: avformat.h:1264
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:649
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:767
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:387
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1434
OutputFilterPriv::crop_right
unsigned crop_right
Definition: ffmpeg_filter.c:207
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:235
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:126
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:895
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:770
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:199
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:865
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:174
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:484
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:162
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:142
Decoder
Definition: ffmpeg.h:462
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:301
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:934
av_frame_side_data_remove
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
Definition: side_data.c:102
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:591
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:925
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:219
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:652
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2643
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:89
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:67
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:732
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1567
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:957
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1226
AVFilterGraph
Definition: avfilter.h:589
AV_FRAME_SIDE_DATA_FLAG_REPLACE
#define AV_FRAME_SIDE_DATA_FLAG_REPLACE
Don't add a new entry if another of the same type exists.
Definition: frame.h:1053
InputFilterPriv::downmixinfo_present
int downmixinfo_present
Definition: ffmpeg_filter.c:146
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:244
InputFilterOptions
Definition: ffmpeg.h:271
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char **graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1245
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:125
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:743
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:414
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:226
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:880
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:492
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:108
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:273
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:44
FilterGraphPriv::nb_threads
int nb_threads
Definition: ffmpeg_filter.c:59
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:188
FilterGraph
Definition: ffmpeg.h:407
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:946
OutputFilterPriv::crop_bottom
unsigned crop_bottom
Definition: ffmpeg_filter.c:205
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:78
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:289
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:374
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:754
AVAlphaMode
AVAlphaMode
Correlation between the alpha channel and color values.
Definition: pixfmt.h:810
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:280
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:594
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2319
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:66
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1991
FrameData::side_data
AVFrameSideData ** side_data
Definition: ffmpeg.h:736
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:265
f
f
Definition: af_crystalizer.c:122
OutputFilter::output_name
char * output_name
Definition: ffmpeg.h:392
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:3293
AVMediaType
AVMediaType
Definition: avutil.h:198
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:140
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:91
FilterGraphThread
Definition: ffmpeg_filter.c:81
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:144
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:230
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:82
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:107
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:751
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:590
av_buffersrc_get_status
int av_buffersrc_get_status(AVFilterContext *ctx)
Returns 0 or a negative AVERROR code.
Definition: buffersrc.c:289
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:217
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
sch_filter_choke_inputs
void sch_filter_choke_inputs(Scheduler *sch, unsigned fg_idx)
Called by filtergraph tasks to choke all filter inputs, preventing them from receiving more frames un...
Definition: ffmpeg_sched.c:2637
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:188
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:127
OutputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:233
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:3065
FilterCommand::time
double time
Definition: ffmpeg_filter.c:256
InputFilterPriv::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:156
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:143
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1484
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:544
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:55
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:476
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2093
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:98
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1540
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:179
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:725
OutputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:390
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:124
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:2035
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1488
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:3064
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2503
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter, int commit)
Definition: ffmpeg_filter.c:1297
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:233
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:3070
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:290
decoders
Decoder ** decoders
Definition: ffmpeg.c:117
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:190
nb_decoders
int nb_decoders
Definition: ffmpeg.c:118
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:401
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2867
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2046
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:3075
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:959
buffersink.h
av_buffersink_get_side_data
const AVFrameSideData *const * av_buffersink_get_side_data(const AVFilterContext *ctx, int *nb_side_data)
Definition: buffersink.c:380
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:201
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:257
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:184
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:168
filter_buffered_frames
int filter_buffered_frames
Definition: ffmpeg_opt.c:78
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:121
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:545
FPSConvContext
Definition: ffmpeg_filter.c:165
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:730
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: side_data.c:133
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3423
downmix_info.h
sch_remove_filtergraph
void sch_remove_filtergraph(Scheduler *sch, int idx)
Definition: ffmpeg_sched.c:460
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:49
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:295
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:209
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:69
src2
const pixel * src2
Definition: h264pred_template.c:421
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1965
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:700
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:182
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
needed
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
Definition: filter_design.txt:212
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
OutputFilterPriv::crop_top
unsigned crop_top
Definition: ffmpeg_filter.c:204
delta
float delta
Definition: vorbis_enc_data.h:430
print_graphs
int print_graphs
Definition: ffmpeg_opt.c:80
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:90
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:527
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:75
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:569
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:116
ifilter_parameters_from_ofilter
static int ifilter_parameters_from_ofilter(InputFilter *ifilter, OutputFilter *ofilter)
Definition: ffmpeg_filter.c:2298
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
IFILTER_FLAG_DROPCHANGED
@ IFILTER_FLAG_DROPCHANGED
Definition: ffmpeg.h:268
AVFrame::side_data
AVFrameSideData ** side_data
Definition: frame.h:624
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:114
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:311
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:200
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
OFILTER_FLAG_CROP
@ OFILTER_FLAG_CROP
Definition: ffmpeg.h:304
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:937
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:60
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
OutputFilterPriv::needed
int needed
Definition: ffmpeg_filter.c:193
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2627
AVFilter
Filter definition.
Definition: avfilter.h:216
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2510
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:160
OFILTER_FLAG_AUTOROTATE
@ OFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:303
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:92
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:744
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
AVALPHA_MODE_UNSPECIFIED
@ AVALPHA_MODE_UNSPECIFIED
Unknown alpha handling, or no alpha channel.
Definition: pixfmt.h:811
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:368
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:73
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:166
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:541
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1590
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:240
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:122
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:3062
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:524
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:992
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:88
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:222
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:499
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:750
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:267
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:382
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:626
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:117
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:918
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:480
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:932
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:138
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:114
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:52
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1539
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:266
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:183
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:615
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:947
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2373
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:243
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:228
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:330
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:294
InputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:135
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:958
OutputFilterPriv::alpha_modes
enum AVAlphaMode * alpha_modes
Definition: ffmpeg_filter.c:231
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:408
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
OutputFilter
Definition: ffmpeg.h:383
InputFilterPriv::drop_warned
int drop_warned
Definition: ffmpeg_filter.c:118
av_log_once
void av_log_once(void *avcl, int initial_level, int subsequent_level, int *state, const char *fmt,...)
Definition: log.c:451
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2952
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:102
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:78
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1616
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:130
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:333
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:229
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:492
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2450
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:404
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:83
llrint
#define llrint(x)
Definition: libm.h:396
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
bind_inputs
static int bind_inputs(FilterGraph *fg, int commit)
Definition: ffmpeg_filter.c:1470
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
InputStream::index
int index
Definition: ffmpeg.h:482
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2547
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:90
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:79
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:252
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: side_data.c:62
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1078
fg_create
int fg_create(FilterGraph **pfg, char **graph_desc, Scheduler *sch, const OutputFilterOptions *opts)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1085
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:260
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:299
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:247
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:465
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:122
InputFilterPriv::end_pts
int64_t end_pts
Definition: ffmpeg_filter.c:153
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:115
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1151
int32_t
int32_t
Definition: audioconvert.c:56
InputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:128
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:341
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:617
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:196
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1454
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
graphprint.h
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:104
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:57
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:237
read_file_to_string
char * read_file_to_string(const char *filename)
Definition: cmdutils.c:1571
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
InputFilterPriv::downmixinfo
AVDownmixInfo downmixinfo
Definition: ffmpeg_filter.c:147
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:130
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:92
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:592
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:277
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:742
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:482
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:214
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:746
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:68
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:197
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:181
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:3233
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:173
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1835
FilterGraph::is_internal
int is_internal
Definition: ffmpeg.h:419
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2092
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:253
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:251
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:124
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:403
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:77
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:282
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
InputFilter::input_name
char * input_name
Definition: ffmpeg.h:376
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:104
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:183