FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/mem.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/samplefmt.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
40 
41 // FIXME private header, used for mid_pred()
42 #include "libavcodec/mathops.h"
43 
44 typedef struct FilterGraphPriv {
46 
47  // name used for logging
48  char log_name[32];
49 
50  int is_simple;
51  // true when the filtergraph contains only meta filters
52  // that do not modify the frame data
53  int is_meta;
54  // source filters are present in the graph
57 
58  unsigned nb_outputs_done;
59 
60  const char *graph_desc;
61 
62  char *nb_threads;
63 
64  // frame for temporarily holding output from the filtergraph
66  // frame for sending output to the encoder
68 
70  unsigned sch_idx;
72 
74 {
75  return (FilterGraphPriv*)fg;
76 }
77 
78 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
79 {
80  return (const FilterGraphPriv*)fg;
81 }
82 
83 // data that is local to the filter thread and not visible outside of it
84 typedef struct FilterGraphThread {
86 
88 
89  // Temporary buffer for output frames, since on filtergraph reset
90  // we cannot send them to encoders immediately.
91  // The output index is stored in frame opaque.
93 
94  // index of the next input to request from the scheduler
95  unsigned next_in;
96  // set to 1 after at least one frame passed through this output
97  int got_frame;
98 
99  // EOF status of each input/output, as received by the thread
100  uint8_t *eof_in;
101  uint8_t *eof_out;
103 
104 typedef struct InputFilterPriv {
106 
108 
109  int index;
110 
112 
113  // used to hold submitted input
115 
116  /* for filters that are not yet bound to an input stream,
117  * this stores the input linklabel, if any */
118  uint8_t *linklabel;
119 
120  // filter data type
122  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
123  // same as type otherwise
125 
126  int eof;
127  int bound;
128 
129  // parameters configured for this input
130  int format;
131 
132  int width, height;
136 
139 
141 
143 
145 
149 
150  struct {
151  AVFrame *frame;
152 
155 
156  ///< marks if sub2video_update should force an initialization
157  unsigned int initialize;
158  } sub2video;
160 
162 {
163  return (InputFilterPriv*)ifilter;
164 }
165 
166 typedef struct FPSConvContext {
168  /* number of frames emitted by the video-encoding sync code */
170  /* history of nb_frames_prev, i.e. the number of times the
171  * previous frame was duplicated by vsync code in recent
172  * do_video_out() calls */
174 
175  uint64_t dup_warning;
176 
179 
181 
187 
188 typedef struct OutputFilterPriv {
190 
191  int index;
192 
193  void *log_parent;
194  char log_name[32];
195 
196  char *name;
197 
199 
200  /* desired output stream properties */
201  int format;
202  int width, height;
207 
208  // time base in which the output is sent to our downstream
209  // does not need to match the filtersink's timebase
211  // at least one frame with the above timebase was sent
212  // to our downstream, so it cannot change anymore
214 
216 
219 
220  // those are only set if no format is specified and the encoder gives us multiple options
221  // They point directly to the relevant lists of the encoder.
222  const int *formats;
224  const int *sample_rates;
227 
231  // offset for output timestamps, in AV_TIME_BASE_Q
235 
236  unsigned flags;
238 
240 {
241  return (OutputFilterPriv*)ofilter;
242 }
243 
244 typedef struct FilterCommand {
245  char *target;
246  char *command;
247  char *arg;
248 
249  double time;
251 } FilterCommand;
252 
253 static void filter_command_free(void *opaque, uint8_t *data)
254 {
256 
257  av_freep(&fc->target);
258  av_freep(&fc->command);
259  av_freep(&fc->arg);
260 
261  av_free(data);
262 }
263 
265 {
266  AVFrame *frame = ifp->sub2video.frame;
267  int ret;
268 
270 
271  frame->width = ifp->width;
272  frame->height = ifp->height;
273  frame->format = ifp->format;
274  frame->colorspace = ifp->color_space;
275  frame->color_range = ifp->color_range;
276 
278  if (ret < 0)
279  return ret;
280 
281  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
282 
283  return 0;
284 }
285 
286 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
287  AVSubtitleRect *r)
288 {
289  uint32_t *pal, *dst2;
290  uint8_t *src, *src2;
291  int x, y;
292 
293  if (r->type != SUBTITLE_BITMAP) {
294  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
295  return;
296  }
297  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
298  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
299  r->x, r->y, r->w, r->h, w, h
300  );
301  return;
302  }
303 
304  dst += r->y * dst_linesize + r->x * 4;
305  src = r->data[0];
306  pal = (uint32_t *)r->data[1];
307  for (y = 0; y < r->h; y++) {
308  dst2 = (uint32_t *)dst;
309  src2 = src;
310  for (x = 0; x < r->w; x++)
311  *(dst2++) = pal[*(src2++)];
312  dst += dst_linesize;
313  src += r->linesize[0];
314  }
315 }
316 
318 {
319  AVFrame *frame = ifp->sub2video.frame;
320  int ret;
321 
322  av_assert1(frame->data[0]);
323  ifp->sub2video.last_pts = frame->pts = pts;
327  if (ret != AVERROR_EOF && ret < 0)
328  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
329  av_err2str(ret));
330 }
331 
332 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
333  const AVSubtitle *sub)
334 {
335  AVFrame *frame = ifp->sub2video.frame;
336  int8_t *dst;
337  int dst_linesize;
338  int num_rects;
339  int64_t pts, end_pts;
340 
341  if (sub) {
342  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
343  AV_TIME_BASE_Q, ifp->time_base);
344  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
345  AV_TIME_BASE_Q, ifp->time_base);
346  num_rects = sub->num_rects;
347  } else {
348  /* If we are initializing the system, utilize current heartbeat
349  PTS as the start time, and show until the following subpicture
350  is received. Otherwise, utilize the previous subpicture's end time
351  as the fall-back value. */
352  pts = ifp->sub2video.initialize ?
353  heartbeat_pts : ifp->sub2video.end_pts;
354  end_pts = INT64_MAX;
355  num_rects = 0;
356  }
357  if (sub2video_get_blank_frame(ifp) < 0) {
359  "Impossible to get a blank canvas.\n");
360  return;
361  }
362  dst = frame->data [0];
363  dst_linesize = frame->linesize[0];
364  for (int i = 0; i < num_rects; i++)
365  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
366  sub2video_push_ref(ifp, pts);
367  ifp->sub2video.end_pts = end_pts;
368  ifp->sub2video.initialize = 0;
369 }
370 
371 /* Define a function for appending a list of allowed formats
372  * to an AVBPrint. If nonempty, the list will have a header. */
373 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
374 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
375 { \
376  if (ofp->var == none && !ofp->supported_list) \
377  return; \
378  av_bprintf(bprint, #name "="); \
379  if (ofp->var != none) { \
380  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
381  } else { \
382  const type *p; \
383  \
384  for (p = ofp->supported_list; *p != none; p++) { \
385  av_bprintf(bprint, printf_format "|", get_name(*p)); \
386  } \
387  if (bprint->len > 0) \
388  bprint->str[--bprint->len] = '\0'; \
389  } \
390  av_bprint_chars(bprint, ':', 1); \
391 }
392 
395 
398 
400  "%d", )
401 
402 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
404 
405 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
407 
408 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
409 {
410  if (av_channel_layout_check(&ofp->ch_layout)) {
411  av_bprintf(bprint, "channel_layouts=");
412  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
413  } else if (ofp->ch_layouts) {
414  const AVChannelLayout *p;
415 
416  av_bprintf(bprint, "channel_layouts=");
417  for (p = ofp->ch_layouts; p->nb_channels; p++) {
419  av_bprintf(bprint, "|");
420  }
421  if (bprint->len > 0)
422  bprint->str[--bprint->len] = '\0';
423  } else
424  return;
425  av_bprint_chars(bprint, ':', 1);
426 }
427 
428 static int read_binary(const char *path, uint8_t **data, int *len)
429 {
430  AVIOContext *io = NULL;
431  int64_t fsize;
432  int ret;
433 
434  *data = NULL;
435  *len = 0;
436 
437  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
438  if (ret < 0) {
439  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
440  path, av_err2str(ret));
441  return ret;
442  }
443 
444  fsize = avio_size(io);
445  if (fsize < 0 || fsize > INT_MAX) {
446  av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
447  ret = AVERROR(EIO);
448  goto fail;
449  }
450 
451  *data = av_malloc(fsize);
452  if (!*data) {
453  ret = AVERROR(ENOMEM);
454  goto fail;
455  }
456 
457  ret = avio_read(io, *data, fsize);
458  if (ret != fsize) {
459  av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
460  ret = ret < 0 ? ret : AVERROR(EIO);
461  goto fail;
462  }
463 
464  *len = fsize;
465 
466  ret = 0;
467 fail:
468  avio_close(io);
469  if (ret < 0) {
470  av_freep(data);
471  *len = 0;
472  }
473  return ret;
474 }
475 
476 static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
477 {
478  const AVOption *o = NULL;
479  int ret;
480 
482  if (ret >= 0)
483  return 0;
484 
485  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
487  if (!o)
488  goto err_apply;
489 
490  // key is a valid option name prefixed with '/'
491  // interpret value as a path from which to load the actual option value
492  key++;
493 
494  if (o->type == AV_OPT_TYPE_BINARY) {
495  uint8_t *data;
496  int len;
497 
498  ret = read_binary(val, &data, &len);
499  if (ret < 0)
500  goto err_load;
501 
503  av_freep(&data);
504  } else {
505  char *data = file_read(val);
506  if (!data) {
507  ret = AVERROR(EIO);
508  goto err_load;
509  }
510 
512  av_freep(&data);
513  }
514  if (ret < 0)
515  goto err_apply;
516 
517  return 0;
518 
519 err_apply:
521  "Error applying option '%s' to filter '%s': %s\n",
522  key, f->filter->name, av_err2str(ret));
523  return ret;
524 err_load:
526  "Error loading value for option '%s' from file '%s'\n",
527  key, val);
528  return ret;
529 }
530 
532 {
533  for (size_t i = 0; i < seg->nb_chains; i++) {
534  AVFilterChain *ch = seg->chains[i];
535 
536  for (size_t j = 0; j < ch->nb_filters; j++) {
537  AVFilterParams *p = ch->filters[j];
538  const AVDictionaryEntry *e = NULL;
539 
540  av_assert0(p->filter);
541 
542  while ((e = av_dict_iterate(p->opts, e))) {
543  int ret = filter_opt_apply(p->filter, e->key, e->value);
544  if (ret < 0)
545  return ret;
546  }
547 
548  av_dict_free(&p->opts);
549  }
550  }
551 
552  return 0;
553 }
554 
555 static int graph_parse(AVFilterGraph *graph, const char *desc,
557  AVBufferRef *hw_device)
558 {
560  int ret;
561 
562  *inputs = NULL;
563  *outputs = NULL;
564 
565  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
566  if (ret < 0)
567  return ret;
568 
570  if (ret < 0)
571  goto fail;
572 
573  if (hw_device) {
574  for (int i = 0; i < graph->nb_filters; i++) {
575  AVFilterContext *f = graph->filters[i];
576 
577  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
578  continue;
579  f->hw_device_ctx = av_buffer_ref(hw_device);
580  if (!f->hw_device_ctx) {
581  ret = AVERROR(ENOMEM);
582  goto fail;
583  }
584  }
585  }
586 
587  ret = graph_opts_apply(seg);
588  if (ret < 0)
589  goto fail;
590 
592 
593 fail:
595  return ret;
596 }
597 
598 // Filters can be configured only if the formats of all inputs are known.
600 {
601  for (int i = 0; i < fg->nb_inputs; i++) {
603  if (ifp->format < 0)
604  return 0;
605  }
606  return 1;
607 }
608 
609 static int filter_thread(void *arg);
610 
611 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
612 {
613  AVFilterContext *ctx = inout->filter_ctx;
614  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
615  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
616 
617  if (nb_pads > 1)
618  return av_strdup(ctx->filter->name);
619  return av_asprintf("%s:%s", ctx->filter->name,
620  avfilter_pad_get_name(pads, inout->pad_idx));
621 }
622 
623 static const char *ofilter_item_name(void *obj)
624 {
625  OutputFilterPriv *ofp = obj;
626  return ofp->log_name;
627 }
628 
629 static const AVClass ofilter_class = {
630  .class_name = "OutputFilter",
631  .version = LIBAVUTIL_VERSION_INT,
632  .item_name = ofilter_item_name,
633  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
634  .category = AV_CLASS_CATEGORY_FILTER,
635 };
636 
638 {
639  OutputFilterPriv *ofp;
640  OutputFilter *ofilter;
641 
642  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
643  if (!ofp)
644  return NULL;
645 
646  ofilter = &ofp->ofilter;
647  ofilter->class = &ofilter_class;
648  ofp->log_parent = fg;
649  ofilter->graph = fg;
650  ofilter->type = type;
651  ofp->format = -1;
654  ofp->index = fg->nb_outputs - 1;
655 
656  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
658 
659  return ofilter;
660 }
661 
662 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
663  const ViewSpecifier *vs)
664 {
665  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
666  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
668  int ret;
669 
670  av_assert0(!ifp->bound);
671  ifp->bound = 1;
672 
673  if (ifp->type != ist->par->codec_type &&
675  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
677  return AVERROR(EINVAL);
678  }
679 
680  ifp->type_src = ist->st->codecpar->codec_type;
681 
682  ifp->opts.fallback = av_frame_alloc();
683  if (!ifp->opts.fallback)
684  return AVERROR(ENOMEM);
685 
686  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
687  vs, &ifp->opts, &src);
688  if (ret < 0)
689  return ret;
690 
691  ret = sch_connect(fgp->sch,
692  src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
693  if (ret < 0)
694  return ret;
695 
696  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
697  ifp->sub2video.frame = av_frame_alloc();
698  if (!ifp->sub2video.frame)
699  return AVERROR(ENOMEM);
700 
701  ifp->width = ifp->opts.sub2video_width;
702  ifp->height = ifp->opts.sub2video_height;
703 
704  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
705  palettes for all rectangles are identical or compatible */
706  ifp->format = AV_PIX_FMT_RGB32;
707 
708  ifp->time_base = AV_TIME_BASE_Q;
709 
710  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
711  ifp->width, ifp->height);
712  }
713 
714  return 0;
715 }
716 
718  const ViewSpecifier *vs)
719 {
722  int ret;
723 
724  av_assert0(!ifp->bound);
725  ifp->bound = 1;
726 
727  if (ifp->type != dec->type) {
728  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
730  return AVERROR(EINVAL);
731  }
732 
733  ifp->type_src = ifp->type;
734 
735  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
736  if (ret < 0)
737  return ret;
738 
739  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
740  if (ret < 0)
741  return ret;
742 
743  return 0;
744 }
745 
746 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
747  const AVChannelLayout *layout_requested)
748 {
749  int i, err;
750 
751  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
752  /* Pass the layout through for all orders but UNSPEC */
753  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
754  if (err < 0)
755  return err;
756  return 0;
757  }
758 
759  /* Requested layout is of order UNSPEC */
760  if (!layouts_allowed) {
761  /* Use the default native layout for the requested amount of channels when the
762  encoder doesn't have a list of supported layouts */
763  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
764  return 0;
765  }
766  /* Encoder has a list of supported layouts. Pick the first layout in it with the
767  same amount of channels as the requested layout */
768  for (i = 0; layouts_allowed[i].nb_channels; i++) {
769  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
770  break;
771  }
772  if (layouts_allowed[i].nb_channels) {
773  /* Use it if one is found */
774  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
775  if (err < 0)
776  return err;
777  return 0;
778  }
779  /* If no layout for the amount of channels requested was found, use the default
780  native layout for it. */
781  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
782 
783  return 0;
784 }
785 
787  unsigned sched_idx_enc,
788  const OutputFilterOptions *opts)
789 {
790  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
791  FilterGraph *fg = ofilter->graph;
792  FilterGraphPriv *fgp = fgp_from_fg(fg);
793  int ret;
794 
795  av_assert0(!ofilter->bound);
796  av_assert0(ofilter->type == ost->type);
797 
798  ofilter->bound = 1;
799  av_freep(&ofilter->linklabel);
800 
801  ofp->flags = opts->flags;
802  ofp->ts_offset = opts->ts_offset;
803  ofp->enc_timebase = opts->output_tb;
804 
805  ofp->trim_start_us = opts->trim_start_us;
806  ofp->trim_duration_us = opts->trim_duration_us;
807 
808  ofp->name = av_strdup(opts->name);
809  if (!ofp->name)
810  return AVERROR(EINVAL);
811 
812  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
813  if (ret < 0)
814  return ret;
815 
816  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
817  if (ret < 0)
818  return ret;
819 
820  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
821  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
822 
823  if (fgp->is_simple) {
824  // for simple filtergraph there is just one output,
825  // so use only graph-level information for logging
826  ofp->log_parent = NULL;
827  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
828  } else
829  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
830 
831  switch (ofilter->type) {
832  case AVMEDIA_TYPE_VIDEO:
833  ofp->width = opts->width;
834  ofp->height = opts->height;
835  if (opts->format != AV_PIX_FMT_NONE) {
836  ofp->format = opts->format;
837  } else
838  ofp->formats = opts->formats;
839 
840  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
841  ofp->color_space = opts->color_space;
842  else
843  ofp->color_spaces = opts->color_spaces;
844 
845  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
846  ofp->color_range = opts->color_range;
847  else
848  ofp->color_ranges = opts->color_ranges;
849 
851 
852  ofp->fps.last_frame = av_frame_alloc();
853  if (!ofp->fps.last_frame)
854  return AVERROR(ENOMEM);
855 
856  ofp->fps.vsync_method = opts->vsync_method;
857  ofp->fps.framerate = ost->frame_rate;
858  ofp->fps.framerate_max = ost->max_frame_rate;
859  ofp->fps.framerate_supported = ost->force_fps || !opts->enc ?
860  NULL : opts->frame_rates;
861 
862  // reduce frame rate for mpeg4 to be within the spec limits
863  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
864  ofp->fps.framerate_clip = 65535;
865 
866  ofp->fps.dup_warning = 1000;
867 
868  break;
869  case AVMEDIA_TYPE_AUDIO:
870  if (opts->format != AV_SAMPLE_FMT_NONE) {
871  ofp->format = opts->format;
872  } else {
873  ofp->formats = opts->formats;
874  }
875  if (opts->sample_rate) {
876  ofp->sample_rate = opts->sample_rate;
877  } else
878  ofp->sample_rates = opts->sample_rates;
879  if (opts->ch_layout.nb_channels) {
880  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
881  if (ret < 0)
882  return ret;
883  } else {
884  ofp->ch_layouts = opts->ch_layouts;
885  }
886  break;
887  }
888 
889  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
890  SCH_ENC(sched_idx_enc));
891  if (ret < 0)
892  return ret;
893 
894  return 0;
895 }
896 
898  const OutputFilterOptions *opts)
899 {
900  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
901 
902  av_assert0(!ofilter->bound);
903  av_assert0(ofilter->type == ifp->type);
904 
905  ofilter->bound = 1;
906  av_freep(&ofilter->linklabel);
907 
908  ofp->name = av_strdup(opts->name);
909  if (!ofp->name)
910  return AVERROR(EINVAL);
911 
912  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
913 
914  return 0;
915 }
916 
917 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
918 {
920  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
922  char name[32];
923  int ret;
924 
925  av_assert0(!ifp->bound);
926  ifp->bound = 1;
927 
928  if (ifp->type != ofilter_src->type) {
929  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
930  av_get_media_type_string(ofilter_src->type),
932  return AVERROR(EINVAL);
933  }
934 
935  ifp->type_src = ifp->type;
936 
937  memset(&opts, 0, sizeof(opts));
938 
939  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->index);
940  opts.name = name;
941 
942  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
943  if (ret < 0)
944  return ret;
945 
946  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
947  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
948  if (ret < 0)
949  return ret;
950 
951  return 0;
952 }
953 
955 {
956  InputFilterPriv *ifp;
957  InputFilter *ifilter;
958 
959  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
960  if (!ifp)
961  return NULL;
962 
963  ifilter = &ifp->ifilter;
964  ifilter->graph = fg;
965 
966  ifp->frame = av_frame_alloc();
967  if (!ifp->frame)
968  return NULL;
969 
970  ifp->index = fg->nb_inputs - 1;
971  ifp->format = -1;
974 
976  if (!ifp->frame_queue)
977  return NULL;
978 
979  return ifilter;
980 }
981 
982 void fg_free(FilterGraph **pfg)
983 {
984  FilterGraph *fg = *pfg;
985  FilterGraphPriv *fgp;
986 
987  if (!fg)
988  return;
989  fgp = fgp_from_fg(fg);
990 
991  for (int j = 0; j < fg->nb_inputs; j++) {
992  InputFilter *ifilter = fg->inputs[j];
993  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
994 
995  if (ifp->frame_queue) {
996  AVFrame *frame;
997  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1000  }
1001  av_frame_free(&ifp->sub2video.frame);
1002 
1003  av_frame_free(&ifp->frame);
1004  av_frame_free(&ifp->opts.fallback);
1005 
1007  av_freep(&ifp->linklabel);
1008  av_freep(&ifp->opts.name);
1009  av_freep(&ifilter->name);
1010  av_freep(&fg->inputs[j]);
1011  }
1012  av_freep(&fg->inputs);
1013  for (int j = 0; j < fg->nb_outputs; j++) {
1014  OutputFilter *ofilter = fg->outputs[j];
1015  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1016 
1017  av_frame_free(&ofp->fps.last_frame);
1018  av_dict_free(&ofp->sws_opts);
1019  av_dict_free(&ofp->swr_opts);
1020 
1021  av_freep(&ofilter->linklabel);
1022  av_freep(&ofilter->name);
1023  av_freep(&ofilter->apad);
1024  av_freep(&ofp->name);
1026  av_freep(&fg->outputs[j]);
1027  }
1028  av_freep(&fg->outputs);
1029  av_freep(&fgp->graph_desc);
1030  av_freep(&fgp->nb_threads);
1031 
1032  av_frame_free(&fgp->frame);
1033  av_frame_free(&fgp->frame_enc);
1034 
1035  av_freep(pfg);
1036 }
1037 
1038 static const char *fg_item_name(void *obj)
1039 {
1040  const FilterGraphPriv *fgp = obj;
1041 
1042  return fgp->log_name;
1043 }
1044 
1045 static const AVClass fg_class = {
1046  .class_name = "FilterGraph",
1047  .version = LIBAVUTIL_VERSION_INT,
1048  .item_name = fg_item_name,
1049  .category = AV_CLASS_CATEGORY_FILTER,
1050 };
1051 
1052 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
1053 {
1054  FilterGraphPriv *fgp;
1055  FilterGraph *fg;
1056 
1058  AVFilterGraph *graph;
1059  int ret = 0;
1060 
1061  fgp = av_mallocz(sizeof(*fgp));
1062  if (!fgp)
1063  return AVERROR(ENOMEM);
1064  fg = &fgp->fg;
1065 
1066  if (pfg) {
1067  *pfg = fg;
1068  fg->index = -1;
1069  } else {
1071  if (ret < 0) {
1072  av_freep(&fgp);
1073  return ret;
1074  }
1075 
1076  fg->index = nb_filtergraphs - 1;
1077  }
1078 
1079  fg->class = &fg_class;
1080  fgp->graph_desc = graph_desc;
1082  fgp->sch = sch;
1083 
1084  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1085 
1086  fgp->frame = av_frame_alloc();
1087  fgp->frame_enc = av_frame_alloc();
1088  if (!fgp->frame || !fgp->frame_enc)
1089  return AVERROR(ENOMEM);
1090 
1091  /* this graph is only used for determining the kinds of inputs
1092  * and outputs we have, and is discarded on exit from this function */
1093  graph = avfilter_graph_alloc();
1094  if (!graph)
1095  return AVERROR(ENOMEM);;
1096  graph->nb_threads = 1;
1097 
1098  ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
1099  if (ret < 0)
1100  goto fail;
1101 
1102  for (unsigned i = 0; i < graph->nb_filters; i++) {
1103  const AVFilter *f = graph->filters[i]->filter;
1104  if ((!avfilter_filter_pad_count(f, 0) &&
1105  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1106  !strcmp(f->name, "apad")) {
1107  fgp->have_sources = 1;
1108  break;
1109  }
1110  }
1111 
1112  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1113  InputFilter *const ifilter = ifilter_alloc(fg);
1114  InputFilterPriv *ifp;
1115 
1116  if (!ifilter) {
1117  ret = AVERROR(ENOMEM);
1118  goto fail;
1119  }
1120 
1121  ifp = ifp_from_ifilter(ifilter);
1122  ifp->linklabel = cur->name;
1123  cur->name = NULL;
1124 
1125  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1126  cur->pad_idx);
1127 
1128  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1129  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1130  "currently.\n");
1131  ret = AVERROR(ENOSYS);
1132  goto fail;
1133  }
1134 
1135  ifilter->name = describe_filter_link(fg, cur, 1);
1136  if (!ifilter->name) {
1137  ret = AVERROR(ENOMEM);
1138  goto fail;
1139  }
1140  }
1141 
1142  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1143  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1144  cur->pad_idx);
1145  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1146 
1147  if (!ofilter) {
1148  ret = AVERROR(ENOMEM);
1149  goto fail;
1150  }
1151 
1152  ofilter->linklabel = cur->name;
1153  cur->name = NULL;
1154 
1155  ofilter->name = describe_filter_link(fg, cur, 0);
1156  if (!ofilter->name) {
1157  ret = AVERROR(ENOMEM);
1158  goto fail;
1159  }
1160  }
1161 
1162  if (!fg->nb_outputs) {
1163  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1164  ret = AVERROR(ENOSYS);
1165  goto fail;
1166  }
1167 
1168  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1169  filter_thread, fgp);
1170  if (ret < 0)
1171  goto fail;
1172  fgp->sch_idx = ret;
1173 
1174 fail:
1177  avfilter_graph_free(&graph);
1178 
1179  if (ret < 0)
1180  return ret;
1181 
1182  return 0;
1183 }
1184 
1186  char *graph_desc,
1187  Scheduler *sch, unsigned sched_idx_enc,
1188  const OutputFilterOptions *opts)
1189 {
1190  FilterGraph *fg;
1191  FilterGraphPriv *fgp;
1192  int ret;
1193 
1194  ret = fg_create(&ost->fg_simple, graph_desc, sch);
1195  if (ret < 0)
1196  return ret;
1197  fg = ost->fg_simple;
1198  fgp = fgp_from_fg(fg);
1199 
1200  fgp->is_simple = 1;
1201 
1202  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1203  av_get_media_type_string(ost->type)[0], opts->name);
1204 
1205  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1206  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1207  "to have exactly 1 input and 1 output. "
1208  "However, it had %d input(s) and %d output(s). Please adjust, "
1209  "or use a complex filtergraph (-filter_complex) instead.\n",
1210  graph_desc, fg->nb_inputs, fg->nb_outputs);
1211  return AVERROR(EINVAL);
1212  }
1213  if (fg->outputs[0]->type != ost->type) {
1214  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1215  "it to %s output stream\n",
1217  av_get_media_type_string(ost->type));
1218  return AVERROR(EINVAL);
1219  }
1220 
1221  ost->filter = fg->outputs[0];
1222 
1223  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1224  if (ret < 0)
1225  return ret;
1226 
1227  ret = ofilter_bind_ost(fg->outputs[0], ost, sched_idx_enc, opts);
1228  if (ret < 0)
1229  return ret;
1230 
1231  if (opts->nb_threads) {
1232  av_freep(&fgp->nb_threads);
1233  fgp->nb_threads = av_strdup(opts->nb_threads);
1234  if (!fgp->nb_threads)
1235  return AVERROR(ENOMEM);
1236  }
1237 
1238  return 0;
1239 }
1240 
1242 {
1243  FilterGraphPriv *fgp = fgp_from_fg(fg);
1244  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1245  InputStream *ist = NULL;
1246  enum AVMediaType type = ifp->type;
1248  const char *spec;
1249  char *p;
1250  int i, ret;
1251 
1252  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1253  // bind to a standalone decoder
1254  int dec_idx;
1255 
1256  dec_idx = strtol(ifp->linklabel + 4, &p, 0);
1257  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1258  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1259  dec_idx, fgp->graph_desc);
1260  return AVERROR(EINVAL);
1261  }
1262 
1263  if (type == AVMEDIA_TYPE_VIDEO) {
1264  spec = *p == ':' ? p + 1 : p;
1265  ret = view_specifier_parse(&spec, &vs);
1266  if (ret < 0)
1267  return ret;
1268  }
1269 
1270  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1271  if (ret < 0)
1272  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1273  ifilter->name);
1274  return ret;
1275  } else if (ifp->linklabel) {
1277  AVFormatContext *s;
1278  AVStream *st = NULL;
1279  int file_idx;
1280 
1281  // try finding an unbound filtergraph output with this label
1282  for (int i = 0; i < nb_filtergraphs; i++) {
1283  FilterGraph *fg_src = filtergraphs[i];
1284 
1285  if (fg == fg_src)
1286  continue;
1287 
1288  for (int j = 0; j < fg_src->nb_outputs; j++) {
1289  OutputFilter *ofilter = fg_src->outputs[j];
1290 
1291  if (!ofilter->bound && ofilter->linklabel &&
1292  !strcmp(ofilter->linklabel, ifp->linklabel)) {
1293  av_log(fg, AV_LOG_VERBOSE,
1294  "Binding input with label '%s' to filtergraph output %d:%d\n",
1295  ifp->linklabel, i, j);
1296 
1297  ret = ifilter_bind_fg(ifp, fg_src, j);
1298  if (ret < 0)
1299  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1300  ifp->linklabel);
1301  return ret;
1302  }
1303  }
1304  }
1305 
1306  // bind to an explicitly specified demuxer stream
1307  file_idx = strtol(ifp->linklabel, &p, 0);
1308  if (file_idx < 0 || file_idx >= nb_input_files) {
1309  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1310  file_idx, fgp->graph_desc);
1311  return AVERROR(EINVAL);
1312  }
1313  s = input_files[file_idx]->ctx;
1314 
1315  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1316  if (ret < 0) {
1317  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1318  return ret;
1319  }
1320 
1321  if (type == AVMEDIA_TYPE_VIDEO) {
1322  spec = ss.remainder ? ss.remainder : "";
1323  ret = view_specifier_parse(&spec, &vs);
1324  if (ret < 0) {
1326  return ret;
1327  }
1328  }
1329 
1330  for (i = 0; i < s->nb_streams; i++) {
1331  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1332  if (stream_type != type &&
1333  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1334  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1335  continue;
1336  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1337  st = s->streams[i];
1338  break;
1339  }
1340  }
1342  if (!st) {
1343  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1344  "matches no streams.\n", p, fgp->graph_desc);
1345  return AVERROR(EINVAL);
1346  }
1347  ist = input_files[file_idx]->streams[st->index];
1348 
1349  av_log(fg, AV_LOG_VERBOSE,
1350  "Binding input with label '%s' to input stream %d:%d\n",
1351  ifp->linklabel, ist->file->index, ist->index);
1352  } else {
1353  ist = ist_find_unused(type);
1354  if (!ist) {
1355  av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
1356  "unlabeled input pad %s\n", ifilter->name);
1357  return AVERROR(EINVAL);
1358  }
1359 
1360  av_log(fg, AV_LOG_VERBOSE,
1361  "Binding unlabeled input %d to input stream %d:%d\n",
1362  ifp->index, ist->file->index, ist->index);
1363  }
1364  av_assert0(ist);
1365 
1366  ret = ifilter_bind_ist(ifilter, ist, &vs);
1367  if (ret < 0) {
1368  av_log(fg, AV_LOG_ERROR,
1369  "Error binding an input stream to complex filtergraph input %s.\n",
1370  ifilter->name);
1371  return ret;
1372  }
1373 
1374  return 0;
1375 }
1376 
1377 static int bind_inputs(FilterGraph *fg)
1378 {
1379  // bind filtergraph inputs to input streams or other filtergraphs
1380  for (int i = 0; i < fg->nb_inputs; i++) {
1382  int ret;
1383 
1384  if (ifp->bound)
1385  continue;
1386 
1387  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1388  if (ret < 0)
1389  return ret;
1390  }
1391 
1392  return 0;
1393 }
1394 
1396 {
1397  int ret;
1398 
1399  for (int i = 0; i < nb_filtergraphs; i++) {
1401  if (ret < 0)
1402  return ret;
1403  }
1404 
1405  // check that all outputs were bound
1406  for (int i = 0; i < nb_filtergraphs; i++) {
1407  FilterGraph *fg = filtergraphs[i];
1408 
1409  for (int j = 0; j < fg->nb_outputs; j++) {
1410  OutputFilter *output = fg->outputs[j];
1411  if (!output->bound) {
1412  av_log(fg, AV_LOG_FATAL,
1413  "Filter '%s' has output %d (%s) unconnected\n",
1414  output->name, j,
1415  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1416  return AVERROR(EINVAL);
1417  }
1418  }
1419  }
1420 
1421  return 0;
1422 }
1423 
1425  AVFilterContext **last_filter, int *pad_idx,
1426  const char *filter_name)
1427 {
1428  AVFilterGraph *graph = (*last_filter)->graph;
1430  const AVFilter *trim;
1431  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1432  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1433  int ret = 0;
1434 
1435  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1436  return 0;
1437 
1438  trim = avfilter_get_by_name(name);
1439  if (!trim) {
1440  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
1441  "recording time.\n", name);
1442  return AVERROR_FILTER_NOT_FOUND;
1443  }
1444 
1445  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1446  if (!ctx)
1447  return AVERROR(ENOMEM);
1448 
1449  if (duration != INT64_MAX) {
1450  ret = av_opt_set_int(ctx, "durationi", duration,
1452  }
1453  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1454  ret = av_opt_set_int(ctx, "starti", start_time,
1456  }
1457  if (ret < 0) {
1458  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1459  return ret;
1460  }
1461 
1463  if (ret < 0)
1464  return ret;
1465 
1466  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1467  if (ret < 0)
1468  return ret;
1469 
1470  *last_filter = ctx;
1471  *pad_idx = 0;
1472  return 0;
1473 }
1474 
1475 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1476  const char *filter_name, const char *args)
1477 {
1478  AVFilterGraph *graph = (*last_filter)->graph;
1479  const AVFilter *filter = avfilter_get_by_name(filter_name);
1481  int ret;
1482 
1483  if (!filter)
1484  return AVERROR_BUG;
1485 
1487  filter,
1488  filter_name, args, NULL, graph);
1489  if (ret < 0)
1490  return ret;
1491 
1492  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1493  if (ret < 0)
1494  return ret;
1495 
1496  *last_filter = ctx;
1497  *pad_idx = 0;
1498  return 0;
1499 }
1500 
1502  OutputFilter *ofilter, AVFilterInOut *out)
1503 {
1504  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1505  AVFilterContext *last_filter = out->filter_ctx;
1506  AVBPrint bprint;
1507  int pad_idx = out->pad_idx;
1508  int ret;
1509  char name[255];
1510 
1511  snprintf(name, sizeof(name), "out_%s", ofp->name);
1513  avfilter_get_by_name("buffersink"),
1514  name, NULL, NULL, graph);
1515 
1516  if (ret < 0)
1517  return ret;
1518 
1519  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1520  char args[255];
1522  const AVDictionaryEntry *e = NULL;
1523 
1524  snprintf(args, sizeof(args), "%d:%d",
1525  ofp->width, ofp->height);
1526 
1527  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1528  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1529  }
1530 
1531  snprintf(name, sizeof(name), "scaler_out_%s", ofp->name);
1533  name, args, NULL, graph)) < 0)
1534  return ret;
1535  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1536  return ret;
1537 
1538  last_filter = filter;
1539  pad_idx = 0;
1540  }
1541 
1543  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1545  choose_pix_fmts(ofp, &bprint);
1546  choose_color_spaces(ofp, &bprint);
1547  choose_color_ranges(ofp, &bprint);
1548  if (!av_bprint_is_complete(&bprint))
1549  return AVERROR(ENOMEM);
1550 
1551  if (bprint.len) {
1553 
1555  avfilter_get_by_name("format"),
1556  "format", bprint.str, NULL, graph);
1557  av_bprint_finalize(&bprint, NULL);
1558  if (ret < 0)
1559  return ret;
1560  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1561  return ret;
1562 
1563  last_filter = filter;
1564  pad_idx = 0;
1565  }
1566 
1567  snprintf(name, sizeof(name), "trim_out_%s", ofp->name);
1569  &last_filter, &pad_idx, name);
1570  if (ret < 0)
1571  return ret;
1572 
1573 
1574  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1575  return ret;
1576 
1577  return 0;
1578 }
1579 
1581  OutputFilter *ofilter, AVFilterInOut *out)
1582 {
1583  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1584  AVFilterContext *last_filter = out->filter_ctx;
1585  int pad_idx = out->pad_idx;
1586  AVBPrint args;
1587  char name[255];
1588  int ret;
1589 
1590  snprintf(name, sizeof(name), "out_%s", ofp->name);
1592  avfilter_get_by_name("abuffersink"),
1593  name, NULL, NULL, graph);
1594  if (ret < 0)
1595  return ret;
1596  if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1597  return ret;
1598 
1599 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1600  AVFilterContext *filt_ctx; \
1601  \
1602  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1603  "similarly to -af " filter_name "=%s.\n", arg); \
1604  \
1605  ret = avfilter_graph_create_filter(&filt_ctx, \
1606  avfilter_get_by_name(filter_name), \
1607  filter_name, arg, NULL, graph); \
1608  if (ret < 0) \
1609  goto fail; \
1610  \
1611  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1612  if (ret < 0) \
1613  goto fail; \
1614  \
1615  last_filter = filt_ctx; \
1616  pad_idx = 0; \
1617 } while (0)
1619 
1620  choose_sample_fmts(ofp, &args);
1621  choose_sample_rates(ofp, &args);
1622  choose_channel_layouts(ofp, &args);
1623  if (!av_bprint_is_complete(&args)) {
1624  ret = AVERROR(ENOMEM);
1625  goto fail;
1626  }
1627  if (args.len) {
1629 
1630  snprintf(name, sizeof(name), "format_out_%s", ofp->name);
1632  avfilter_get_by_name("aformat"),
1633  name, args.str, NULL, graph);
1634  if (ret < 0)
1635  goto fail;
1636 
1637  ret = avfilter_link(last_filter, pad_idx, format, 0);
1638  if (ret < 0)
1639  goto fail;
1640 
1641  last_filter = format;
1642  pad_idx = 0;
1643  }
1644 
1645  if (ofilter->apad) {
1646  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1647  fgp->have_sources = 1;
1648  }
1649 
1650  snprintf(name, sizeof(name), "trim for output %s", ofp->name);
1652  &last_filter, &pad_idx, name);
1653  if (ret < 0)
1654  goto fail;
1655 
1656  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1657  goto fail;
1658 fail:
1659  av_bprint_finalize(&args, NULL);
1660 
1661  return ret;
1662 }
1663 
1665  OutputFilter *ofilter, AVFilterInOut *out)
1666 {
1667  switch (ofilter->type) {
1668  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1669  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1670  default: av_assert0(0); return 0;
1671  }
1672 }
1673 
1675 {
1676  ifp->sub2video.last_pts = INT64_MIN;
1677  ifp->sub2video.end_pts = INT64_MIN;
1678 
1679  /* sub2video structure has been (re-)initialized.
1680  Mark it as such so that the system will be
1681  initialized with the first received heartbeat. */
1682  ifp->sub2video.initialize = 1;
1683 }
1684 
1686  InputFilter *ifilter, AVFilterInOut *in)
1687 {
1688  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1689 
1690  AVFilterContext *last_filter;
1691  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1692  const AVPixFmtDescriptor *desc;
1693  AVRational fr = ifp->opts.framerate;
1694  AVRational sar;
1695  AVBPrint args;
1696  char name[255];
1697  int ret, pad_idx = 0;
1699  if (!par)
1700  return AVERROR(ENOMEM);
1701 
1702  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1703  sub2video_prepare(ifp);
1704 
1705  sar = ifp->sample_aspect_ratio;
1706  if(!sar.den)
1707  sar = (AVRational){0,1};
1709  av_bprintf(&args,
1710  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1711  "pixel_aspect=%d/%d:colorspace=%d:range=%d",
1712  ifp->width, ifp->height, ifp->format,
1713  ifp->time_base.num, ifp->time_base.den, sar.num, sar.den,
1714  ifp->color_space, ifp->color_range);
1715  if (fr.num && fr.den)
1716  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1717  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1718  ifp->opts.name);
1719 
1720 
1721  if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
1722  args.str, NULL, graph)) < 0)
1723  goto fail;
1724  par->hw_frames_ctx = ifp->hw_frames_ctx;
1725  ret = av_buffersrc_parameters_set(ifp->filter, par);
1726  if (ret < 0)
1727  goto fail;
1728  av_freep(&par);
1729  last_filter = ifp->filter;
1730 
1732  av_assert0(desc);
1733 
1734  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1735  char crop_buf[64];
1736  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1737  ifp->opts.crop_left, ifp->opts.crop_right,
1738  ifp->opts.crop_top, ifp->opts.crop_bottom,
1739  ifp->opts.crop_left, ifp->opts.crop_top);
1740  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1741  if (ret < 0)
1742  return ret;
1743  }
1744 
1745  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1746  ifp->displaymatrix_applied = 0;
1747  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1748  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1749  int32_t *displaymatrix = ifp->displaymatrix;
1750  double theta;
1751 
1752  theta = get_rotation(displaymatrix);
1753 
1754  if (fabs(theta - 90) < 1.0) {
1755  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1756  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1757  } else if (fabs(theta - 180) < 1.0) {
1758  if (displaymatrix[0] < 0) {
1759  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1760  if (ret < 0)
1761  return ret;
1762  }
1763  if (displaymatrix[4] < 0) {
1764  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1765  }
1766  } else if (fabs(theta - 270) < 1.0) {
1767  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1768  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1769  } else if (fabs(theta) > 1.0) {
1770  char rotate_buf[64];
1771  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1772  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1773  } else if (fabs(theta) < 1.0) {
1774  if (displaymatrix && displaymatrix[4] < 0) {
1775  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1776  }
1777  }
1778  if (ret < 0)
1779  return ret;
1780 
1781  ifp->displaymatrix_applied = 1;
1782  }
1783 
1784  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1786  &last_filter, &pad_idx, name);
1787  if (ret < 0)
1788  return ret;
1789 
1790  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1791  return ret;
1792  return 0;
1793 fail:
1794  av_freep(&par);
1795 
1796  return ret;
1797 }
1798 
1800  InputFilter *ifilter, AVFilterInOut *in)
1801 {
1802  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1803  AVFilterContext *last_filter;
1804  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1805  AVBPrint args;
1806  char name[255];
1807  int ret, pad_idx = 0;
1808 
1810  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1811  ifp->time_base.num, ifp->time_base.den,
1812  ifp->sample_rate,
1814  if (av_channel_layout_check(&ifp->ch_layout) &&
1816  av_bprintf(&args, ":channel_layout=");
1818  } else
1819  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1820  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1821 
1822  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1823  name, args.str, NULL,
1824  graph)) < 0)
1825  return ret;
1826  last_filter = ifp->filter;
1827 
1828  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1830  &last_filter, &pad_idx, name);
1831  if (ret < 0)
1832  return ret;
1833 
1834  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1835  return ret;
1836 
1837  return 0;
1838 }
1839 
1841  InputFilter *ifilter, AVFilterInOut *in)
1842 {
1843  switch (ifp_from_ifilter(ifilter)->type) {
1844  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1845  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1846  default: av_assert0(0); return 0;
1847  }
1848 }
1849 
1851 {
1852  for (int i = 0; i < fg->nb_outputs; i++)
1854  for (int i = 0; i < fg->nb_inputs; i++)
1855  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1856  avfilter_graph_free(&fgt->graph);
1857 }
1858 
1860 {
1861  return f->nb_inputs == 0 &&
1862  (!strcmp(f->filter->name, "buffer") ||
1863  !strcmp(f->filter->name, "abuffer"));
1864 }
1865 
1866 static int graph_is_meta(AVFilterGraph *graph)
1867 {
1868  for (unsigned i = 0; i < graph->nb_filters; i++) {
1869  const AVFilterContext *f = graph->filters[i];
1870 
1871  /* in addition to filters flagged as meta, also
1872  * disregard sinks and buffersources (but not other sources,
1873  * since they introduce data we are not aware of)
1874  */
1875  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1876  f->nb_outputs == 0 ||
1878  return 0;
1879  }
1880  return 1;
1881 }
1882 
1883 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1884 
1886 {
1887  FilterGraphPriv *fgp = fgp_from_fg(fg);
1888  AVBufferRef *hw_device;
1889  AVFilterInOut *inputs, *outputs, *cur;
1890  int ret, i, simple = filtergraph_is_simple(fg);
1891  int have_input_eof = 0;
1892  const char *graph_desc = fgp->graph_desc;
1893 
1894  cleanup_filtergraph(fg, fgt);
1895  fgt->graph = avfilter_graph_alloc();
1896  if (!fgt->graph)
1897  return AVERROR(ENOMEM);
1898 
1899  if (simple) {
1900  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1901 
1902  if (filter_nbthreads) {
1903  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1904  if (ret < 0)
1905  goto fail;
1906  } else if (fgp->nb_threads) {
1907  ret = av_opt_set(fgt->graph, "threads", fgp->nb_threads, 0);
1908  if (ret < 0)
1909  return ret;
1910  }
1911 
1912  if (av_dict_count(ofp->sws_opts)) {
1914  &fgt->graph->scale_sws_opts,
1915  '=', ':');
1916  if (ret < 0)
1917  goto fail;
1918  }
1919 
1920  if (av_dict_count(ofp->swr_opts)) {
1921  char *args;
1922  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1923  if (ret < 0)
1924  goto fail;
1925  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1926  av_free(args);
1927  }
1928  } else {
1930  }
1931 
1932  hw_device = hw_device_for_filter();
1933 
1934  if ((ret = graph_parse(fgt->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
1935  goto fail;
1936 
1937  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1938  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1941  goto fail;
1942  }
1944 
1945  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1946  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
1947  if (ret < 0) {
1949  goto fail;
1950  }
1951  }
1953 
1954  if (fgp->disable_conversions)
1956  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1957  goto fail;
1958 
1959  fgp->is_meta = graph_is_meta(fgt->graph);
1960 
1961  /* limit the lists of allowed formats to the ones selected, to
1962  * make sure they stay the same if the filtergraph is reconfigured later */
1963  for (int i = 0; i < fg->nb_outputs; i++) {
1964  OutputFilter *ofilter = fg->outputs[i];
1965  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1966  AVFilterContext *sink = ofp->filter;
1967 
1968  ofp->format = av_buffersink_get_format(sink);
1969 
1970  ofp->width = av_buffersink_get_w(sink);
1971  ofp->height = av_buffersink_get_h(sink);
1974 
1975  // If the timing parameters are not locked yet, get the tentative values
1976  // here but don't lock them. They will only be used if no output frames
1977  // are ever produced.
1978  if (!ofp->tb_out_locked) {
1980  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1981  fr.num > 0 && fr.den > 0)
1982  ofp->fps.framerate = fr;
1983  ofp->tb_out = av_buffersink_get_time_base(sink);
1984  }
1986 
1989  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1990  if (ret < 0)
1991  goto fail;
1992  }
1993 
1994  for (int i = 0; i < fg->nb_inputs; i++) {
1996  AVFrame *tmp;
1997  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1998  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1999  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2000  } else {
2001  if (ifp->type_src == AVMEDIA_TYPE_VIDEO) {
2002  if (ifp->displaymatrix_applied)
2004  }
2006  }
2007  av_frame_free(&tmp);
2008  if (ret < 0)
2009  goto fail;
2010  }
2011  }
2012 
2013  /* send the EOFs for the finished inputs */
2014  for (int i = 0; i < fg->nb_inputs; i++) {
2016  if (fgt->eof_in[i]) {
2018  if (ret < 0)
2019  goto fail;
2020  have_input_eof = 1;
2021  }
2022  }
2023 
2024  if (have_input_eof) {
2025  // make sure the EOF propagates to the end of the graph
2027  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2028  goto fail;
2029  }
2030 
2031  return 0;
2032 fail:
2033  cleanup_filtergraph(fg, fgt);
2034  return ret;
2035 }
2036 
2038 {
2039  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2040  AVFrameSideData *sd;
2041  int ret;
2042 
2043  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2044  if (ret < 0)
2045  return ret;
2046 
2047  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2048  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2049  frame->time_base;
2050 
2051  ifp->format = frame->format;
2052 
2053  ifp->width = frame->width;
2054  ifp->height = frame->height;
2055  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2056  ifp->color_space = frame->colorspace;
2057  ifp->color_range = frame->color_range;
2058 
2059  ifp->sample_rate = frame->sample_rate;
2060  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2061  if (ret < 0)
2062  return ret;
2063 
2065  if (sd)
2066  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2067  ifp->displaymatrix_present = !!sd;
2068 
2069  return 0;
2070 }
2071 
2073 {
2074  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2075  return fgp->is_simple;
2076 }
2077 
2078 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2079  double time, const char *target,
2080  const char *command, const char *arg, int all_filters)
2081 {
2082  int ret;
2083 
2084  if (!graph)
2085  return;
2086 
2087  if (time < 0) {
2088  char response[4096];
2089  ret = avfilter_graph_send_command(graph, target, command, arg,
2090  response, sizeof(response),
2091  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2092  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2093  fg->index, ret, response);
2094  } else if (!all_filters) {
2095  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2096  } else {
2097  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2098  if (ret < 0)
2099  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2100  }
2101 }
2102 
2103 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2104 {
2105  int nb_requests, nb_requests_max = -1;
2106  int best_input = -1;
2107 
2108  for (int i = 0; i < fg->nb_inputs; i++) {
2109  InputFilter *ifilter = fg->inputs[i];
2110  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2111 
2112  if (fgt->eof_in[i])
2113  continue;
2114 
2115  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2116  if (nb_requests > nb_requests_max) {
2117  nb_requests_max = nb_requests;
2118  best_input = i;
2119  }
2120  }
2121 
2122  av_assert0(best_input >= 0);
2123 
2124  return best_input;
2125 }
2126 
2128 {
2129  OutputFilter *ofilter = &ofp->ofilter;
2130  FPSConvContext *fps = &ofp->fps;
2131  AVRational tb = (AVRational){ 0, 0 };
2132  AVRational fr;
2133  const FrameData *fd;
2134 
2135  fd = frame_data_c(frame);
2136 
2137  // apply -enc_time_base
2138  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2139  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2140  av_log(ofp, AV_LOG_ERROR,
2141  "Demuxing timebase not available - cannot use it for encoding\n");
2142  return AVERROR(EINVAL);
2143  }
2144 
2145  switch (ofp->enc_timebase.num) {
2146  case 0: break;
2147  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2148  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2149  default: tb = ofp->enc_timebase; break;
2150  }
2151 
2152  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2153  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2154  goto finish;
2155  }
2156 
2157  fr = fps->framerate;
2158  if (!fr.num) {
2160  if (fr_sink.num > 0 && fr_sink.den > 0)
2161  fr = fr_sink;
2162  }
2163 
2164  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2165  if (!fr.num && !fps->framerate_max.num) {
2166  fr = (AVRational){25, 1};
2167  av_log(ofp, AV_LOG_WARNING,
2168  "No information "
2169  "about the input framerate is available. Falling "
2170  "back to a default value of 25fps. Use the -r option "
2171  "if you want a different framerate.\n");
2172  }
2173 
2174  if (fps->framerate_max.num &&
2175  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2176  !fr.den))
2177  fr = fps->framerate_max;
2178  }
2179 
2180  if (fr.num > 0) {
2181  if (fps->framerate_supported) {
2182  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2183  fr = fps->framerate_supported[idx];
2184  }
2185  if (fps->framerate_clip) {
2186  av_reduce(&fr.num, &fr.den,
2187  fr.num, fr.den, fps->framerate_clip);
2188  }
2189  }
2190 
2191  if (!(tb.num > 0 && tb.den > 0))
2192  tb = av_inv_q(fr);
2193  if (!(tb.num > 0 && tb.den > 0))
2194  tb = frame->time_base;
2195 
2196  fps->framerate = fr;
2197 finish:
2198  ofp->tb_out = tb;
2199  ofp->tb_out_locked = 1;
2200 
2201  return 0;
2202 }
2203 
2206 {
2207  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2208 
2209  AVRational tb = tb_dst;
2210  AVRational filter_tb = frame->time_base;
2211  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2212 
2213  if (frame->pts == AV_NOPTS_VALUE)
2214  goto early_exit;
2215 
2216  tb.den <<= extra_bits;
2217  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2219  float_pts /= 1 << extra_bits;
2220  // when float_pts is not exactly an integer,
2221  // avoid exact midpoints to reduce the chance of rounding differences, this
2222  // can be removed in case the fps code is changed to work with integers
2223  if (float_pts != llrint(float_pts))
2224  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2225 
2226  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2228  frame->time_base = tb_dst;
2229 
2230 early_exit:
2231 
2232  if (debug_ts) {
2233  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2234  frame ? av_ts2str(frame->pts) : "NULL",
2235  av_ts2timestr(frame->pts, &tb_dst),
2236  float_pts, tb_dst.num, tb_dst.den);
2237  }
2238 
2239  return float_pts;
2240 }
2241 
2242 /* Convert frame timestamps to the encoder timebase and decide how many times
2243  * should this (and possibly previous) frame be repeated in order to conform to
2244  * desired target framerate (if any).
2245  */
2247  int64_t *nb_frames, int64_t *nb_frames_prev)
2248 {
2249  OutputFilter *ofilter = &ofp->ofilter;
2250  FPSConvContext *fps = &ofp->fps;
2251  double delta0, delta, sync_ipts, duration;
2252 
2253  if (!frame) {
2254  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2255  fps->frames_prev_hist[1],
2256  fps->frames_prev_hist[2]);
2257 
2258  if (!*nb_frames && fps->last_dropped) {
2259  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2260  fps->last_dropped++;
2261  }
2262 
2263  goto finish;
2264  }
2265 
2266  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2267 
2268  sync_ipts = adjust_frame_pts_to_encoder_tb(frame, ofp->tb_out, ofp->ts_offset);
2269  /* delta0 is the "drift" between the input frame and
2270  * where it would fall in the output. */
2271  delta0 = sync_ipts - ofp->next_pts;
2272  delta = delta0 + duration;
2273 
2274  // tracks the number of times the PREVIOUS frame should be duplicated,
2275  // mostly for variable framerate (VFR)
2276  *nb_frames_prev = 0;
2277  /* by default, we output a single frame */
2278  *nb_frames = 1;
2279 
2280  if (delta0 < 0 &&
2281  delta > 0 &&
2284  && fps->vsync_method != VSYNC_DROP
2285 #endif
2286  ) {
2287  if (delta0 < -0.6) {
2288  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2289  } else
2290  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2291  sync_ipts = ofp->next_pts;
2292  duration += delta0;
2293  delta0 = 0;
2294  }
2295 
2296  switch (fps->vsync_method) {
2297  case VSYNC_VSCFR:
2298  if (fps->frame_number == 0 && delta0 >= 0.5) {
2299  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2300  delta = duration;
2301  delta0 = 0;
2302  ofp->next_pts = llrint(sync_ipts);
2303  }
2304  case VSYNC_CFR:
2305  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2306  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2307  *nb_frames = 0;
2308  } else if (delta < -1.1)
2309  *nb_frames = 0;
2310  else if (delta > 1.1) {
2311  *nb_frames = llrintf(delta);
2312  if (delta0 > 1.1)
2313  *nb_frames_prev = llrintf(delta0 - 0.6);
2314  }
2315  frame->duration = 1;
2316  break;
2317  case VSYNC_VFR:
2318  if (delta <= -0.6)
2319  *nb_frames = 0;
2320  else if (delta > 0.6)
2321  ofp->next_pts = llrint(sync_ipts);
2322  frame->duration = llrint(duration);
2323  break;
2324 #if FFMPEG_OPT_VSYNC_DROP
2325  case VSYNC_DROP:
2326 #endif
2327  case VSYNC_PASSTHROUGH:
2328  ofp->next_pts = llrint(sync_ipts);
2329  frame->duration = llrint(duration);
2330  break;
2331  default:
2332  av_assert0(0);
2333  }
2334 
2335 finish:
2336  memmove(fps->frames_prev_hist + 1,
2337  fps->frames_prev_hist,
2338  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2339  fps->frames_prev_hist[0] = *nb_frames_prev;
2340 
2341  if (*nb_frames_prev == 0 && fps->last_dropped) {
2342  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2343  av_log(ofp, AV_LOG_VERBOSE,
2344  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2345  fps->frame_number, fps->last_frame->pts);
2346  }
2347  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2348  uint64_t nb_frames_dup;
2349  if (*nb_frames > dts_error_threshold * 30) {
2350  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2351  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2352  *nb_frames = 0;
2353  return;
2354  }
2355  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2356  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2357  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2358  if (nb_frames_dup > fps->dup_warning) {
2359  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2360  fps->dup_warning *= 10;
2361  }
2362  }
2363 
2364  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2365  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2366 }
2367 
2369 {
2371  int ret;
2372 
2373  // we are finished and no frames were ever seen at this output,
2374  // at least initialize the encoder with a dummy frame
2375  if (!fgt->got_frame) {
2376  AVFrame *frame = fgt->frame;
2377  FrameData *fd;
2378 
2379  frame->time_base = ofp->tb_out;
2380  frame->format = ofp->format;
2381 
2382  frame->width = ofp->width;
2383  frame->height = ofp->height;
2384  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2385 
2386  frame->sample_rate = ofp->sample_rate;
2387  if (ofp->ch_layout.nb_channels) {
2388  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2389  if (ret < 0)
2390  return ret;
2391  }
2392 
2393  fd = frame_data(frame);
2394  if (!fd)
2395  return AVERROR(ENOMEM);
2396 
2397  fd->frame_rate_filter = ofp->fps.framerate;
2398 
2399  av_assert0(!frame->buf[0]);
2400 
2401  av_log(ofp, AV_LOG_WARNING,
2402  "No filtered frames for output stream, trying to "
2403  "initialize anyway.\n");
2404 
2405  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2406  if (ret < 0) {
2408  return ret;
2409  }
2410  }
2411 
2412  fgt->eof_out[ofp->index] = 1;
2413 
2414  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2415  return (ret == AVERROR_EOF) ? 0 : ret;
2416 }
2417 
2419  AVFrame *frame)
2420 {
2422  AVFrame *frame_prev = ofp->fps.last_frame;
2423  enum AVMediaType type = ofp->ofilter.type;
2424 
2425  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2426 
2427  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2428  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2429 
2430  for (int64_t i = 0; i < nb_frames; i++) {
2431  AVFrame *frame_out;
2432  int ret;
2433 
2434  if (type == AVMEDIA_TYPE_VIDEO) {
2435  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2436  frame_prev : frame;
2437  if (!frame_in)
2438  break;
2439 
2440  frame_out = fgp->frame_enc;
2441  ret = av_frame_ref(frame_out, frame_in);
2442  if (ret < 0)
2443  return ret;
2444 
2445  frame_out->pts = ofp->next_pts;
2446 
2447  if (ofp->fps.dropped_keyframe) {
2448  frame_out->flags |= AV_FRAME_FLAG_KEY;
2449  ofp->fps.dropped_keyframe = 0;
2450  }
2451  } else {
2452  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2453  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2455 
2456  frame->time_base = ofp->tb_out;
2457  frame->duration = av_rescale_q(frame->nb_samples,
2458  (AVRational){ 1, frame->sample_rate },
2459  ofp->tb_out);
2460 
2461  ofp->next_pts = frame->pts + frame->duration;
2462 
2463  frame_out = frame;
2464  }
2465 
2466  // send the frame to consumers
2467  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2468  if (ret < 0) {
2469  av_frame_unref(frame_out);
2470 
2471  if (!fgt->eof_out[ofp->index]) {
2472  fgt->eof_out[ofp->index] = 1;
2473  fgp->nb_outputs_done++;
2474  }
2475 
2476  return ret == AVERROR_EOF ? 0 : ret;
2477  }
2478 
2479  if (type == AVMEDIA_TYPE_VIDEO) {
2480  ofp->fps.frame_number++;
2481  ofp->next_pts++;
2482 
2483  if (i == nb_frames_prev && frame)
2484  frame->flags &= ~AV_FRAME_FLAG_KEY;
2485  }
2486 
2487  fgt->got_frame = 1;
2488  }
2489 
2490  if (frame && frame_prev) {
2491  av_frame_unref(frame_prev);
2492  av_frame_move_ref(frame_prev, frame);
2493  }
2494 
2495  if (!frame)
2496  return close_output(ofp, fgt);
2497 
2498  return 0;
2499 }
2500 
2502  AVFrame *frame)
2503 {
2505  AVFilterContext *filter = ofp->filter;
2506  FrameData *fd;
2507  int ret;
2508 
2511  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2512  ret = fg_output_frame(ofp, fgt, NULL);
2513  return (ret < 0) ? ret : 1;
2514  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2515  return 1;
2516  } else if (ret < 0) {
2517  av_log(ofp, AV_LOG_WARNING,
2518  "Error in retrieving a frame from the filtergraph: %s\n",
2519  av_err2str(ret));
2520  return ret;
2521  }
2522 
2523  if (fgt->eof_out[ofp->index]) {
2525  return 0;
2526  }
2527 
2529 
2530  if (debug_ts)
2531  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2532  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2533  frame->time_base.num, frame->time_base.den);
2534 
2535  // Choose the output timebase the first time we get a frame.
2536  if (!ofp->tb_out_locked) {
2537  ret = choose_out_timebase(ofp, frame);
2538  if (ret < 0) {
2539  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2541  return ret;
2542  }
2543  }
2544 
2545  fd = frame_data(frame);
2546  if (!fd) {
2548  return AVERROR(ENOMEM);
2549  }
2550 
2552 
2553  // only use bits_per_raw_sample passed through from the decoder
2554  // if the filtergraph did not touch the frame data
2555  if (!fgp->is_meta)
2556  fd->bits_per_raw_sample = 0;
2557 
2558  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2559  if (!frame->duration) {
2561  if (fr.num > 0 && fr.den > 0)
2562  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2563  }
2564 
2565  fd->frame_rate_filter = ofp->fps.framerate;
2566  }
2567 
2568  ret = fg_output_frame(ofp, fgt, frame);
2570  if (ret < 0)
2571  return ret;
2572 
2573  return 0;
2574 }
2575 
2576 /* retrieve all frames available at filtergraph outputs
2577  * and send them to consumers */
2579  AVFrame *frame)
2580 {
2581  FilterGraphPriv *fgp = fgp_from_fg(fg);
2582  int did_step = 0;
2583 
2584  // graph not configured, just select the input to request
2585  if (!fgt->graph) {
2586  for (int i = 0; i < fg->nb_inputs; i++) {
2588  if (ifp->format < 0 && !fgt->eof_in[i]) {
2589  fgt->next_in = i;
2590  return 0;
2591  }
2592  }
2593 
2594  // This state - graph is not configured, but all inputs are either
2595  // initialized or EOF - should be unreachable because sending EOF to a
2596  // filter without even a fallback format should fail
2597  av_assert0(0);
2598  return AVERROR_BUG;
2599  }
2600 
2601  while (fgp->nb_outputs_done < fg->nb_outputs) {
2602  int ret;
2603 
2605  if (ret == AVERROR(EAGAIN)) {
2606  fgt->next_in = choose_input(fg, fgt);
2607  break;
2608  } else if (ret < 0) {
2609  if (ret == AVERROR_EOF)
2610  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2611  else
2612  av_log(fg, AV_LOG_ERROR,
2613  "Error requesting a frame from the filtergraph: %s\n",
2614  av_err2str(ret));
2615  return ret;
2616  }
2617  fgt->next_in = fg->nb_inputs;
2618 
2619  // return after one iteration, so that scheduler can rate-control us
2620  if (did_step && fgp->have_sources)
2621  return 0;
2622 
2623  /* Reap all buffers present in the buffer sinks */
2624  for (int i = 0; i < fg->nb_outputs; i++) {
2626 
2627  ret = 0;
2628  while (!ret) {
2629  ret = fg_output_step(ofp, fgt, frame);
2630  if (ret < 0)
2631  return ret;
2632  }
2633  }
2634  did_step = 1;
2635  }
2636 
2637  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2638 }
2639 
2641 {
2642  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2643  int64_t pts2;
2644 
2645  /* subtitles seem to be usually muxed ahead of other streams;
2646  if not, subtracting a larger time here is necessary */
2647  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2648 
2649  /* do not send the heartbeat frame if the subtitle is already ahead */
2650  if (pts2 <= ifp->sub2video.last_pts)
2651  return;
2652 
2653  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2654  /* if we have hit the end of the current displayed subpicture,
2655  or if we need to initialize the system, update the
2656  overlayed subpicture and its start/end times */
2657  sub2video_update(ifp, pts2 + 1, NULL);
2658  else
2659  sub2video_push_ref(ifp, pts2);
2660 }
2661 
2662 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2663 {
2664  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2665  int ret;
2666 
2667  if (buffer) {
2668  AVFrame *tmp;
2669 
2670  if (!frame)
2671  return 0;
2672 
2673  tmp = av_frame_alloc();
2674  if (!tmp)
2675  return AVERROR(ENOMEM);
2676 
2678 
2679  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2680  if (ret < 0) {
2681  av_frame_free(&tmp);
2682  return ret;
2683  }
2684 
2685  return 0;
2686  }
2687 
2688  // heartbeat frame
2689  if (frame && !frame->buf[0]) {
2690  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2691  return 0;
2692  }
2693 
2694  if (!frame) {
2695  if (ifp->sub2video.end_pts < INT64_MAX)
2696  sub2video_update(ifp, INT64_MAX, NULL);
2697 
2698  return av_buffersrc_add_frame(ifp->filter, NULL);
2699  }
2700 
2701  ifp->width = frame->width ? frame->width : ifp->width;
2702  ifp->height = frame->height ? frame->height : ifp->height;
2703 
2704  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2705 
2706  return 0;
2707 }
2708 
2709 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2710  int64_t pts, AVRational tb)
2711 {
2712  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2713  int ret;
2714 
2715  if (fgt->eof_in[ifp->index])
2716  return 0;
2717 
2718  fgt->eof_in[ifp->index] = 1;
2719 
2720  if (ifp->filter) {
2721  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2723 
2725  if (ret < 0)
2726  return ret;
2727  } else {
2728  if (ifp->format < 0) {
2729  // the filtergraph was never configured, use the fallback parameters
2730  ifp->format = ifp->opts.fallback->format;
2731  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2732  ifp->width = ifp->opts.fallback->width;
2733  ifp->height = ifp->opts.fallback->height;
2735  ifp->color_space = ifp->opts.fallback->colorspace;
2736  ifp->color_range = ifp->opts.fallback->color_range;
2737  ifp->time_base = ifp->opts.fallback->time_base;
2738 
2740  &ifp->opts.fallback->ch_layout);
2741  if (ret < 0)
2742  return ret;
2743 
2744  if (ifilter_has_all_input_formats(ifilter->graph)) {
2745  ret = configure_filtergraph(ifilter->graph, fgt);
2746  if (ret < 0) {
2747  av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
2748  return ret;
2749  }
2750  }
2751  }
2752 
2753  if (ifp->format < 0) {
2755  "Cannot determine format of input %s after EOF\n",
2756  ifp->opts.name);
2757  return AVERROR_INVALIDDATA;
2758  }
2759  }
2760 
2761  return 0;
2762 }
2763 
2765  VIDEO_CHANGED = (1 << 0),
2766  AUDIO_CHANGED = (1 << 1),
2767  MATRIX_CHANGED = (1 << 2),
2768  HWACCEL_CHANGED = (1 << 3)
2769 };
2770 
2771 static const char *unknown_if_null(const char *str)
2772 {
2773  return str ? str : "unknown";
2774 }
2775 
2777  InputFilter *ifilter, AVFrame *frame)
2778 {
2779  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2780  FrameData *fd;
2781  AVFrameSideData *sd;
2782  int need_reinit = 0, ret;
2783 
2784  /* determine if the parameters for this input changed */
2785  switch (ifp->type) {
2786  case AVMEDIA_TYPE_AUDIO:
2787  if (ifp->format != frame->format ||
2788  ifp->sample_rate != frame->sample_rate ||
2789  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2790  need_reinit |= AUDIO_CHANGED;
2791  break;
2792  case AVMEDIA_TYPE_VIDEO:
2793  if (ifp->format != frame->format ||
2794  ifp->width != frame->width ||
2795  ifp->height != frame->height ||
2796  ifp->color_space != frame->colorspace ||
2797  ifp->color_range != frame->color_range)
2798  need_reinit |= VIDEO_CHANGED;
2799  break;
2800  }
2801 
2803  if (!ifp->displaymatrix_present ||
2804  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2805  need_reinit |= MATRIX_CHANGED;
2806  } else if (ifp->displaymatrix_present)
2807  need_reinit |= MATRIX_CHANGED;
2808 
2809  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2810  need_reinit = 0;
2811 
2812  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2813  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2814  need_reinit |= HWACCEL_CHANGED;
2815 
2816  if (need_reinit) {
2818  if (ret < 0)
2819  return ret;
2820  }
2821 
2822  /* (re)init the graph if possible, otherwise buffer the frame and return */
2823  if (need_reinit || !fgt->graph) {
2824  AVFrame *tmp = av_frame_alloc();
2825 
2826  if (!tmp)
2827  return AVERROR(ENOMEM);
2828 
2829  if (!ifilter_has_all_input_formats(fg)) {
2831 
2832  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2833  if (ret < 0)
2834  av_frame_free(&tmp);
2835 
2836  return ret;
2837  }
2838 
2839  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2840  av_frame_free(&tmp);
2841  if (ret < 0)
2842  return ret;
2843 
2844  if (fgt->graph) {
2845  AVBPrint reason;
2847  if (need_reinit & AUDIO_CHANGED) {
2848  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2849  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2850  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2851  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2852  }
2853  if (need_reinit & VIDEO_CHANGED) {
2854  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2855  const char *color_space_name = av_color_space_name(frame->colorspace);
2856  const char *color_range_name = av_color_range_name(frame->color_range);
2857  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2858  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2859  unknown_if_null(color_space_name), frame->width, frame->height);
2860  }
2861  if (need_reinit & MATRIX_CHANGED)
2862  av_bprintf(&reason, "display matrix changed, ");
2863  if (need_reinit & HWACCEL_CHANGED)
2864  av_bprintf(&reason, "hwaccel changed, ");
2865  if (reason.len > 1)
2866  reason.str[reason.len - 2] = '\0'; // remove last comma
2867  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2868  }
2869 
2870  ret = configure_filtergraph(fg, fgt);
2871  if (ret < 0) {
2872  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2873  return ret;
2874  }
2875  }
2876 
2877  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2878  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2879  frame->time_base = ifp->time_base;
2880 
2881  if (ifp->displaymatrix_applied)
2883 
2884  fd = frame_data(frame);
2885  if (!fd)
2886  return AVERROR(ENOMEM);
2888 
2891  if (ret < 0) {
2893  if (ret != AVERROR_EOF)
2894  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2895  return ret;
2896  }
2897 
2898  return 0;
2899 }
2900 
2901 static void fg_thread_set_name(const FilterGraph *fg)
2902 {
2903  char name[16];
2904  if (filtergraph_is_simple(fg)) {
2905  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2906  snprintf(name, sizeof(name), "%cf%s",
2908  ofp->name);
2909  } else {
2910  snprintf(name, sizeof(name), "fc%d", fg->index);
2911  }
2912 
2914 }
2915 
2917 {
2918  if (fgt->frame_queue_out) {
2919  AVFrame *frame;
2920  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2921  av_frame_free(&frame);
2923  }
2924 
2925  av_frame_free(&fgt->frame);
2926  av_freep(&fgt->eof_in);
2927  av_freep(&fgt->eof_out);
2928 
2929  avfilter_graph_free(&fgt->graph);
2930 
2931  memset(fgt, 0, sizeof(*fgt));
2932 }
2933 
2934 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2935 {
2936  memset(fgt, 0, sizeof(*fgt));
2937 
2938  fgt->frame = av_frame_alloc();
2939  if (!fgt->frame)
2940  goto fail;
2941 
2942  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2943  if (!fgt->eof_in)
2944  goto fail;
2945 
2946  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2947  if (!fgt->eof_out)
2948  goto fail;
2949 
2951  if (!fgt->frame_queue_out)
2952  goto fail;
2953 
2954  return 0;
2955 
2956 fail:
2957  fg_thread_uninit(fgt);
2958  return AVERROR(ENOMEM);
2959 }
2960 
2961 static int filter_thread(void *arg)
2962 {
2963  FilterGraphPriv *fgp = arg;
2964  FilterGraph *fg = &fgp->fg;
2965 
2966  FilterGraphThread fgt;
2967  int ret = 0, input_status = 0;
2968 
2969  ret = fg_thread_init(&fgt, fg);
2970  if (ret < 0)
2971  goto finish;
2972 
2973  fg_thread_set_name(fg);
2974 
2975  // if we have all input parameters the graph can now be configured
2977  ret = configure_filtergraph(fg, &fgt);
2978  if (ret < 0) {
2979  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2980  av_err2str(ret));
2981  goto finish;
2982  }
2983  }
2984 
2985  while (1) {
2986  InputFilter *ifilter;
2987  InputFilterPriv *ifp;
2988  enum FrameOpaque o;
2989  unsigned input_idx = fgt.next_in;
2990 
2991  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2992  &input_idx, fgt.frame);
2993  if (input_status == AVERROR_EOF) {
2994  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
2995  break;
2996  } else if (input_status == AVERROR(EAGAIN)) {
2997  // should only happen when we didn't request any input
2998  av_assert0(input_idx == fg->nb_inputs);
2999  goto read_frames;
3000  }
3001  av_assert0(input_status >= 0);
3002 
3003  o = (intptr_t)fgt.frame->opaque;
3004 
3005  o = (intptr_t)fgt.frame->opaque;
3006 
3007  // message on the control stream
3008  if (input_idx == fg->nb_inputs) {
3009  FilterCommand *fc;
3010 
3011  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3012 
3013  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3014  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3015  fc->all_filters);
3016  av_frame_unref(fgt.frame);
3017  continue;
3018  }
3019 
3020  // we received an input frame or EOF
3021  ifilter = fg->inputs[input_idx];
3022  ifp = ifp_from_ifilter(ifilter);
3023 
3024  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3025  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3026  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3027  !fgt.graph);
3028  } else if (fgt.frame->buf[0]) {
3029  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3030  } else {
3032  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3033  }
3034  av_frame_unref(fgt.frame);
3035  if (ret == AVERROR_EOF) {
3036  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3037  input_idx);
3038  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
3039  continue;
3040  }
3041  if (ret < 0)
3042  goto finish;
3043 
3044 read_frames:
3045  // retrieve all newly avalable frames
3046  ret = read_frames(fg, &fgt, fgt.frame);
3047  if (ret == AVERROR_EOF) {
3048  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3049  break;
3050  } else if (ret < 0) {
3051  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3052  av_err2str(ret));
3053  goto finish;
3054  }
3055  }
3056 
3057  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3059 
3060  if (fgt.eof_out[i] || !fgt.graph)
3061  continue;
3062 
3063  ret = fg_output_frame(ofp, &fgt, NULL);
3064  if (ret < 0)
3065  goto finish;
3066  }
3067 
3068 finish:
3069  // EOF is normal termination
3070  if (ret == AVERROR_EOF)
3071  ret = 0;
3072 
3073  fg_thread_uninit(&fgt);
3074 
3075  return ret;
3076 }
3077 
3078 void fg_send_command(FilterGraph *fg, double time, const char *target,
3079  const char *command, const char *arg, int all_filters)
3080 {
3081  FilterGraphPriv *fgp = fgp_from_fg(fg);
3082  AVBufferRef *buf;
3083  FilterCommand *fc;
3084 
3085  fc = av_mallocz(sizeof(*fc));
3086  if (!fc)
3087  return;
3088 
3089  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3090  if (!buf) {
3091  av_freep(&fc);
3092  return;
3093  }
3094 
3095  fc->target = av_strdup(target);
3096  fc->command = av_strdup(command);
3097  fc->arg = av_strdup(arg);
3098  if (!fc->target || !fc->command || !fc->arg) {
3099  av_buffer_unref(&buf);
3100  return;
3101  }
3102 
3103  fc->time = time;
3104  fc->all_filters = all_filters;
3105 
3106  fgp->frame->buf[0] = buf;
3107  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3108 
3109  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3110 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2238
formats
formats
Definition: signature.h:47
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1840
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:95
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:640
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:668
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:210
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:625
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:355
av_clip
#define av_clip
Definition: common.h:100
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:121
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2440
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:353
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:232
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:198
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:105
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2243
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2103
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1475
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:65
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:69
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:97
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:786
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:138
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:976
FrameData
Definition: ffmpeg.h:673
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2078
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:153
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:282
OutputFilter::apad
char * apad
Definition: ffmpeg.h:363
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:292
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:951
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:111
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:375
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:280
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2037
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1007
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:629
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2768
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:62
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:1006
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:140
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1664
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:247
AVSubtitleRect
Definition: avcodec.h:2211
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2242
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1748
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:982
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:173
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:537
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:679
InputFile::index
int index
Definition: ffmpeg.h:477
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:914
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
AVFrame::width
int width
Definition: frame.h:461
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:48
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:55
StreamSpecifier
Definition: cmdutils.h:113
AVOption
AVOption.
Definition: opt.h:429
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2418
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:173
FilterGraph::index
int index
Definition: ffmpeg.h:373
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:137
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
data
const char data[16]
Definition: mxf.c:148
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:177
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:232
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1850
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:472
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:376
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2765
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:128
AVDictionary
Definition: dict.c:34
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:661
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:239
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:316
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:260
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1580
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:602
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2501
FilterGraphPriv
Definition: ffmpeg_filter.c:44
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:594
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:100
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1885
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:194
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:998
InputStream
Definition: ffmpeg.h:432
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:76
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:70
OutputFilterOptions
Definition: ffmpeg.h:300
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:267
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:288
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3341
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.c:188
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2916
fail
#define fail()
Definition: checkasm.h:188
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
OutputFilterPriv::name
char * name
Definition: ffmpeg_filter.c:196
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:317
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:264
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:599
val
static double val(void *priv, double ch)
Definition: aeval.c:77
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:191
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:790
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1685
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:748
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1866
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:87
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:141
FrameData::tb
AVRational tb
Definition: ffmpeg.h:683
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:217
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:73
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:203
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputFilterPriv::sub2video
struct InputFilterPriv::@8 sub2video
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:178
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:360
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:268
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:829
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2640
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:229
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:686
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2709
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:104
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1241
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:190
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:954
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1090
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:253
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:662
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:67
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:114
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:377
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:623
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:117
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:116
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:717
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:361
InputFilter
Definition: ffmpeg.h:347
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:58
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:297
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2244
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:2934
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:270
graph_opts_apply
static int graph_opts_apply(AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:531
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:279
init_simple_filtergraph
int init_simple_filtergraph(InputStream *ist, OutputStream *ost, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1185
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:348
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:234
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1038
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1186
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:223
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:202
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:281
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3281
AVFormatContext
Format I/O context.
Definition: avformat.h:1287
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:634
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:771
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
FilterGraphPriv::nb_threads
char * nb_threads
Definition: ffmpeg_filter.c:62
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:356
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1294
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:228
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:881
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:134
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:815
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:746
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:204
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1022
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:175
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:440
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:160
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:146
Decoder
Definition: ffmpeg.h:418
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1033
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:296
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1091
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:762
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:118
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:897
filter_opt_apply
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:476
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:876
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:215
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:637
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2368
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:92
mathops.h
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:70
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:690
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1424
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1114
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1222
AVFilterGraph
Definition: avfilter.h:760
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:266
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:133
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:652
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:378
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:222
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:800
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:448
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:104
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:275
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:45
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:189
FilterGraph
Definition: ffmpeg.h:371
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1103
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1493
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:284
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:911
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:277
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:765
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2072
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:65
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1957
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:261
f
f
Definition: af_crystalizer.c:122
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:2961
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:144
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:84
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:148
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:311
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:226
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:85
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:104
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:908
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:588
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:213
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:148
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:173
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2204
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:135
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2767
FilterCommand::time
double time
Definition: ffmpeg_filter.c:249
insert_trim
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1424
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:157
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:147
graph_parse
static int graph_parse(AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:555
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1342
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:516
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
read_binary
static int read_binary(const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:428
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:476
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:56
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:453
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2241
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:101
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:60
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1465
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:180
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1017
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:132
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1859
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1395
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2766
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2375
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2771
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:285
decoders
Decoder ** decoders
Definition: ffmpeg.c:113
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:193
nb_decoders
int nb_decoders
Definition: ffmpeg.c:114
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:365
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2578
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:804
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2194
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2776
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:948
buffersink.h
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:834
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:206
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:250
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:185
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:169
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:117
FPSConvContext
Definition: ffmpeg_filter.c:166
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:109
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:688
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3078
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:50
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:290
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:192
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:422
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1799
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:183
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:479
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:637
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:78
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:126
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:110
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:307
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:205
ofilter_bind_ost
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:786
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:927
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2485
AVFilter
Filter definition.
Definition: avfilter.h:201
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2246
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:161
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1052
mid_pred
#define mid_pred
Definition: mathops.h:96
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:97
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:748
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pixfmt.h
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:349
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:778
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:167
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:493
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1475
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1074
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:233
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2764
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:496
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:981
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:218
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:461
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:754
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:263
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:373
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:611
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:127
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:436
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1089
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:142
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:124
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:437
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:53
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:262
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:168
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:917
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2127
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:236
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:224
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:286
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1115
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:372
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:444
OutputFilter
Definition: ffmpeg.h:352
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2662
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:105
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1501
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:129
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:312
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:225
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:491
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:368
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:79
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:438
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2419
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:245
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1045
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:250
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:294
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:421
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:130
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:154
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:111
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:332
timestamp.h
OutputStream
Definition: mux.c:53
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:616
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:201
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1312
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:107
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:57
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:230
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:91
bind_inputs
static int bind_inputs(FilterGraph *fg)
Definition: ffmpeg_filter.c:1377
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:763
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:460
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:651
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:459
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:210
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:903
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:202
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:182
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2901
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:157
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1674
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2240
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:246
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:244
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:132
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2885
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:367
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:77
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:277
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:184