FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/downmix_info.h"
34 #include "libavutil/mem.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/pixfmt.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/time.h"
40 #include "libavutil/timestamp.h"
41 
42 // FIXME private header, used for mid_pred()
43 #include "libavcodec/mathops.h"
44 
45 typedef struct FilterGraphPriv {
47 
48  // name used for logging
49  char log_name[32];
50 
51  int is_simple;
52  // true when the filtergraph contains only meta filters
53  // that do not modify the frame data
54  int is_meta;
55  // source filters are present in the graph
58 
59  unsigned nb_outputs_done;
60 
61  const char *graph_desc;
62 
64 
65  // frame for temporarily holding output from the filtergraph
67  // frame for sending output to the encoder
69 
71  unsigned sch_idx;
73 
75 {
76  return (FilterGraphPriv*)fg;
77 }
78 
79 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
80 {
81  return (const FilterGraphPriv*)fg;
82 }
83 
84 // data that is local to the filter thread and not visible outside of it
85 typedef struct FilterGraphThread {
87 
89 
90  // Temporary buffer for output frames, since on filtergraph reset
91  // we cannot send them to encoders immediately.
92  // The output index is stored in frame opaque.
94 
95  // index of the next input to request from the scheduler
96  unsigned next_in;
97  // set to 1 after at least one frame passed through this output
98  int got_frame;
99 
100  // EOF status of each input/output, as received by the thread
101  uint8_t *eof_in;
102  uint8_t *eof_out;
104 
105 typedef struct InputFilterPriv {
107 
109 
110  int index;
111 
113 
114  // used to hold submitted input
116 
117  /* for filters that are not yet bound to an input stream,
118  * this stores the input linklabel, if any */
119  uint8_t *linklabel;
120 
121  // filter data type
123  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
124  // same as type otherwise
126 
127  int eof;
128  int bound;
130  uint64_t nb_dropped;
131 
132  // parameters configured for this input
133  int format;
134 
135  int width, height;
139 
142 
144 
147 
149 
151 
155 
158 
159  struct {
160  AVFrame *frame;
161 
164 
165  /// marks if sub2video_update should force an initialization
166  unsigned int initialize;
167  } sub2video;
169 
171 {
172  return (InputFilterPriv*)ifilter;
173 }
174 
175 typedef struct FPSConvContext {
177  /* number of frames emitted by the video-encoding sync code */
179  /* history of nb_frames_prev, i.e. the number of times the
180  * previous frame was duplicated by vsync code in recent
181  * do_video_out() calls */
183 
184  uint64_t dup_warning;
185 
188 
190 
196 
197 typedef struct OutputFilterPriv {
199 
200  int index;
201 
202  void *log_parent;
203  char log_name[32];
204 
205  char *name;
206 
208 
209  /* desired output stream properties */
210  int format;
211  int width, height;
216 
219 
220  // time base in which the output is sent to our downstream
221  // does not need to match the filtersink's timebase
223  // at least one frame with the above timebase was sent
224  // to our downstream, so it cannot change anymore
226 
228 
231 
232  // those are only set if no format is specified and the encoder gives us multiple options
233  // They point directly to the relevant lists of the encoder.
234  const int *formats;
236  const int *sample_rates;
239 
243  // offset for output timestamps, in AV_TIME_BASE_Q
247 
248  unsigned flags;
250 
252 {
253  return (OutputFilterPriv*)ofilter;
254 }
255 
256 typedef struct FilterCommand {
257  char *target;
258  char *command;
259  char *arg;
260 
261  double time;
263 } FilterCommand;
264 
265 static void filter_command_free(void *opaque, uint8_t *data)
266 {
268 
269  av_freep(&fc->target);
270  av_freep(&fc->command);
271  av_freep(&fc->arg);
272 
273  av_free(data);
274 }
275 
277 {
278  AVFrame *frame = ifp->sub2video.frame;
279  int ret;
280 
282 
283  frame->width = ifp->width;
284  frame->height = ifp->height;
285  frame->format = ifp->format;
286  frame->colorspace = ifp->color_space;
287  frame->color_range = ifp->color_range;
288 
290  if (ret < 0)
291  return ret;
292 
293  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
294 
295  return 0;
296 }
297 
298 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
299  AVSubtitleRect *r)
300 {
301  uint32_t *pal, *dst2;
302  uint8_t *src, *src2;
303  int x, y;
304 
305  if (r->type != SUBTITLE_BITMAP) {
306  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
307  return;
308  }
309  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
310  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
311  r->x, r->y, r->w, r->h, w, h
312  );
313  return;
314  }
315 
316  dst += r->y * dst_linesize + r->x * 4;
317  src = r->data[0];
318  pal = (uint32_t *)r->data[1];
319  for (y = 0; y < r->h; y++) {
320  dst2 = (uint32_t *)dst;
321  src2 = src;
322  for (x = 0; x < r->w; x++)
323  *(dst2++) = pal[*(src2++)];
324  dst += dst_linesize;
325  src += r->linesize[0];
326  }
327 }
328 
330 {
331  AVFrame *frame = ifp->sub2video.frame;
332  int ret;
333 
334  av_assert1(frame->data[0]);
335  ifp->sub2video.last_pts = frame->pts = pts;
339  if (ret != AVERROR_EOF && ret < 0)
341  "Error while add the frame to buffer source(%s).\n",
342  av_err2str(ret));
343 }
344 
345 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
346  const AVSubtitle *sub)
347 {
348  AVFrame *frame = ifp->sub2video.frame;
349  int8_t *dst;
350  int dst_linesize;
351  int num_rects;
352  int64_t pts, end_pts;
353 
354  if (sub) {
355  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
356  AV_TIME_BASE_Q, ifp->time_base);
357  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
358  AV_TIME_BASE_Q, ifp->time_base);
359  num_rects = sub->num_rects;
360  } else {
361  /* If we are initializing the system, utilize current heartbeat
362  PTS as the start time, and show until the following subpicture
363  is received. Otherwise, utilize the previous subpicture's end time
364  as the fall-back value. */
365  pts = ifp->sub2video.initialize ?
366  heartbeat_pts : ifp->sub2video.end_pts;
367  end_pts = INT64_MAX;
368  num_rects = 0;
369  }
370  if (sub2video_get_blank_frame(ifp) < 0) {
372  "Impossible to get a blank canvas.\n");
373  return;
374  }
375  dst = frame->data [0];
376  dst_linesize = frame->linesize[0];
377  for (int i = 0; i < num_rects; i++)
378  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
379  sub2video_push_ref(ifp, pts);
380  ifp->sub2video.end_pts = end_pts;
381  ifp->sub2video.initialize = 0;
382 }
383 
384 /* Define a function for appending a list of allowed formats
385  * to an AVBPrint. If nonempty, the list will have a header. */
386 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
387 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
388 { \
389  if (ofp->var == none && !ofp->supported_list) \
390  return; \
391  av_bprintf(bprint, #name "="); \
392  if (ofp->var != none) { \
393  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
394  } else { \
395  const type *p; \
396  \
397  for (p = ofp->supported_list; *p != none; p++) { \
398  av_bprintf(bprint, printf_format "|", get_name(*p)); \
399  } \
400  if (bprint->len > 0) \
401  bprint->str[--bprint->len] = '\0'; \
402  } \
403  av_bprint_chars(bprint, ':', 1); \
404 }
405 
408 
411 
413  "%d", )
414 
415 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
417 
418 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
420 
421 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
422 {
423  if (av_channel_layout_check(&ofp->ch_layout)) {
424  av_bprintf(bprint, "channel_layouts=");
425  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
426  } else if (ofp->ch_layouts) {
427  const AVChannelLayout *p;
428 
429  av_bprintf(bprint, "channel_layouts=");
430  for (p = ofp->ch_layouts; p->nb_channels; p++) {
432  av_bprintf(bprint, "|");
433  }
434  if (bprint->len > 0)
435  bprint->str[--bprint->len] = '\0';
436  } else
437  return;
438  av_bprint_chars(bprint, ':', 1);
439 }
440 
441 static int read_binary(void *logctx, const char *path,
442  uint8_t **data, int *len)
443 {
444  AVIOContext *io = NULL;
445  int64_t fsize;
446  int ret;
447 
448  *data = NULL;
449  *len = 0;
450 
451  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
452  if (ret < 0) {
453  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
454  path, av_err2str(ret));
455  return ret;
456  }
457 
458  fsize = avio_size(io);
459  if (fsize < 0 || fsize > INT_MAX) {
460  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
461  ret = AVERROR(EIO);
462  goto fail;
463  }
464 
465  *data = av_malloc(fsize);
466  if (!*data) {
467  ret = AVERROR(ENOMEM);
468  goto fail;
469  }
470 
471  ret = avio_read(io, *data, fsize);
472  if (ret != fsize) {
473  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
474  ret = ret < 0 ? ret : AVERROR(EIO);
475  goto fail;
476  }
477 
478  *len = fsize;
479 
480  ret = 0;
481 fail:
482  avio_close(io);
483  if (ret < 0) {
484  av_freep(data);
485  *len = 0;
486  }
487  return ret;
488 }
489 
490 static int filter_opt_apply(void *logctx, AVFilterContext *f,
491  const char *key, const char *val)
492 {
493  const AVOption *o = NULL;
494  int ret;
495 
497  if (ret >= 0)
498  return 0;
499 
500  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
502  if (!o)
503  goto err_apply;
504 
505  // key is a valid option name prefixed with '/'
506  // interpret value as a path from which to load the actual option value
507  key++;
508 
509  if (o->type == AV_OPT_TYPE_BINARY) {
510  uint8_t *data;
511  int len;
512 
513  ret = read_binary(logctx, val, &data, &len);
514  if (ret < 0)
515  goto err_load;
516 
518  av_freep(&data);
519  } else {
520  char *data = file_read(val);
521  if (!data) {
522  ret = AVERROR(EIO);
523  goto err_load;
524  }
525 
527  av_freep(&data);
528  }
529  if (ret < 0)
530  goto err_apply;
531 
532  return 0;
533 
534 err_apply:
535  av_log(logctx, AV_LOG_ERROR,
536  "Error applying option '%s' to filter '%s': %s\n",
537  key, f->filter->name, av_err2str(ret));
538  return ret;
539 err_load:
540  av_log(logctx, AV_LOG_ERROR,
541  "Error loading value for option '%s' from file '%s'\n",
542  key, val);
543  return ret;
544 }
545 
546 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
547 {
548  for (size_t i = 0; i < seg->nb_chains; i++) {
549  AVFilterChain *ch = seg->chains[i];
550 
551  for (size_t j = 0; j < ch->nb_filters; j++) {
552  AVFilterParams *p = ch->filters[j];
553  const AVDictionaryEntry *e = NULL;
554 
555  av_assert0(p->filter);
556 
557  while ((e = av_dict_iterate(p->opts, e))) {
558  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
559  if (ret < 0)
560  return ret;
561  }
562 
563  av_dict_free(&p->opts);
564  }
565  }
566 
567  return 0;
568 }
569 
570 static int graph_parse(void *logctx,
571  AVFilterGraph *graph, const char *desc,
573  AVBufferRef *hw_device)
574 {
576  int ret;
577 
578  *inputs = NULL;
579  *outputs = NULL;
580 
581  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
582  if (ret < 0)
583  return ret;
584 
586  if (ret < 0)
587  goto fail;
588 
589  if (hw_device) {
590  for (int i = 0; i < graph->nb_filters; i++) {
591  AVFilterContext *f = graph->filters[i];
592 
593  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
594  continue;
595  f->hw_device_ctx = av_buffer_ref(hw_device);
596  if (!f->hw_device_ctx) {
597  ret = AVERROR(ENOMEM);
598  goto fail;
599  }
600  }
601  }
602 
603  ret = graph_opts_apply(logctx, seg);
604  if (ret < 0)
605  goto fail;
606 
608 
609 fail:
611  return ret;
612 }
613 
614 // Filters can be configured only if the formats of all inputs are known.
616 {
617  for (int i = 0; i < fg->nb_inputs; i++) {
619  if (ifp->format < 0)
620  return 0;
621  }
622  return 1;
623 }
624 
625 static int filter_thread(void *arg);
626 
627 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
628 {
629  AVFilterContext *ctx = inout->filter_ctx;
630  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
631  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
632 
633  if (nb_pads > 1)
634  return av_strdup(ctx->filter->name);
635  return av_asprintf("%s:%s", ctx->filter->name,
636  avfilter_pad_get_name(pads, inout->pad_idx));
637 }
638 
639 static const char *ofilter_item_name(void *obj)
640 {
641  OutputFilterPriv *ofp = obj;
642  return ofp->log_name;
643 }
644 
645 static const AVClass ofilter_class = {
646  .class_name = "OutputFilter",
647  .version = LIBAVUTIL_VERSION_INT,
648  .item_name = ofilter_item_name,
649  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
650  .category = AV_CLASS_CATEGORY_FILTER,
651 };
652 
654 {
655  OutputFilterPriv *ofp;
656  OutputFilter *ofilter;
657 
658  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
659  if (!ofp)
660  return NULL;
661 
662  ofilter = &ofp->ofilter;
663  ofilter->class = &ofilter_class;
664  ofp->log_parent = fg;
665  ofilter->graph = fg;
666  ofilter->type = type;
667  ofp->format = -1;
670  ofp->index = fg->nb_outputs - 1;
671 
672  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
674 
675  return ofilter;
676 }
677 
678 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
679  const ViewSpecifier *vs)
680 {
681  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
682  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
684  int ret;
685 
686  av_assert0(!ifp->bound);
687  ifp->bound = 1;
688 
689  if (ifp->type != ist->par->codec_type &&
691  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
693  return AVERROR(EINVAL);
694  }
695 
696  ifp->type_src = ist->st->codecpar->codec_type;
697 
698  ifp->opts.fallback = av_frame_alloc();
699  if (!ifp->opts.fallback)
700  return AVERROR(ENOMEM);
701 
702  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
703  vs, &ifp->opts, &src);
704  if (ret < 0)
705  return ret;
706 
707  ret = sch_connect(fgp->sch,
708  src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
709  if (ret < 0)
710  return ret;
711 
712  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
713  ifp->sub2video.frame = av_frame_alloc();
714  if (!ifp->sub2video.frame)
715  return AVERROR(ENOMEM);
716 
717  ifp->width = ifp->opts.sub2video_width;
718  ifp->height = ifp->opts.sub2video_height;
719 
720  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
721  palettes for all rectangles are identical or compatible */
722  ifp->format = AV_PIX_FMT_RGB32;
723 
724  ifp->time_base = AV_TIME_BASE_Q;
725 
726  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
727  ifp->width, ifp->height);
728  }
729 
730  return 0;
731 }
732 
734  const ViewSpecifier *vs)
735 {
738  int ret;
739 
740  av_assert0(!ifp->bound);
741  ifp->bound = 1;
742 
743  if (ifp->type != dec->type) {
744  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
746  return AVERROR(EINVAL);
747  }
748 
749  ifp->type_src = ifp->type;
750 
751  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
752  if (ret < 0)
753  return ret;
754 
755  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
756  if (ret < 0)
757  return ret;
758 
759  return 0;
760 }
761 
762 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
763  const AVChannelLayout *layout_requested)
764 {
765  int i, err;
766 
767  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
768  /* Pass the layout through for all orders but UNSPEC */
769  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
770  if (err < 0)
771  return err;
772  return 0;
773  }
774 
775  /* Requested layout is of order UNSPEC */
776  if (!layouts_allowed) {
777  /* Use the default native layout for the requested amount of channels when the
778  encoder doesn't have a list of supported layouts */
779  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
780  return 0;
781  }
782  /* Encoder has a list of supported layouts. Pick the first layout in it with the
783  same amount of channels as the requested layout */
784  for (i = 0; layouts_allowed[i].nb_channels; i++) {
785  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
786  break;
787  }
788  if (layouts_allowed[i].nb_channels) {
789  /* Use it if one is found */
790  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
791  if (err < 0)
792  return err;
793  return 0;
794  }
795  /* If no layout for the amount of channels requested was found, use the default
796  native layout for it. */
797  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
798 
799  return 0;
800 }
801 
802 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
803  const OutputFilterOptions *opts)
804 {
805  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
806  FilterGraph *fg = ofilter->graph;
807  FilterGraphPriv *fgp = fgp_from_fg(fg);
808  int ret;
809 
810  av_assert0(!ofilter->bound);
811  av_assert0(!opts->enc ||
812  ofilter->type == opts->enc->type);
813 
814  ofilter->bound = 1;
815  av_freep(&ofilter->linklabel);
816 
817  ofp->flags = opts->flags;
818  ofp->ts_offset = opts->ts_offset;
819  ofp->enc_timebase = opts->output_tb;
820 
821  ofp->trim_start_us = opts->trim_start_us;
822  ofp->trim_duration_us = opts->trim_duration_us;
823 
824  ofp->name = av_strdup(opts->name);
825  if (!ofp->name)
826  return AVERROR(EINVAL);
827 
828  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
829  if (ret < 0)
830  return ret;
831 
832  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
833  if (ret < 0)
834  return ret;
835 
836  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
837  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
838 
839  if (fgp->is_simple) {
840  // for simple filtergraph there is just one output,
841  // so use only graph-level information for logging
842  ofp->log_parent = NULL;
843  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
844  } else
845  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
846 
847  switch (ofilter->type) {
848  case AVMEDIA_TYPE_VIDEO:
849  ofp->width = opts->width;
850  ofp->height = opts->height;
851  if (opts->format != AV_PIX_FMT_NONE) {
852  ofp->format = opts->format;
853  } else
854  ofp->formats = opts->formats;
855 
856  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
857  ofp->color_space = opts->color_space;
858  else
859  ofp->color_spaces = opts->color_spaces;
860 
861  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
862  ofp->color_range = opts->color_range;
863  else
864  ofp->color_ranges = opts->color_ranges;
865 
867 
868  ofp->fps.last_frame = av_frame_alloc();
869  if (!ofp->fps.last_frame)
870  return AVERROR(ENOMEM);
871 
872  ofp->fps.vsync_method = opts->vsync_method;
873  ofp->fps.framerate = opts->frame_rate;
874  ofp->fps.framerate_max = opts->max_frame_rate;
875  ofp->fps.framerate_supported = opts->frame_rates;
876 
877  // reduce frame rate for mpeg4 to be within the spec limits
878  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
879  ofp->fps.framerate_clip = 65535;
880 
881  ofp->fps.dup_warning = 1000;
882 
883  break;
884  case AVMEDIA_TYPE_AUDIO:
885  if (opts->format != AV_SAMPLE_FMT_NONE) {
886  ofp->format = opts->format;
887  } else {
888  ofp->formats = opts->formats;
889  }
890  if (opts->sample_rate) {
891  ofp->sample_rate = opts->sample_rate;
892  } else
893  ofp->sample_rates = opts->sample_rates;
894  if (opts->ch_layout.nb_channels) {
895  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
896  if (ret < 0)
897  return ret;
898  } else {
899  ofp->ch_layouts = opts->ch_layouts;
900  }
901  break;
902  }
903 
904  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
905  SCH_ENC(sched_idx_enc));
906  if (ret < 0)
907  return ret;
908 
909  return 0;
910 }
911 
913  const OutputFilterOptions *opts)
914 {
915  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
916 
917  av_assert0(!ofilter->bound);
918  av_assert0(ofilter->type == ifp->type);
919 
920  ofilter->bound = 1;
921  av_freep(&ofilter->linklabel);
922 
923  ofp->name = av_strdup(opts->name);
924  if (!ofp->name)
925  return AVERROR(EINVAL);
926 
927  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
928 
929  return 0;
930 }
931 
932 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
933 {
935  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
937  char name[32];
938  int ret;
939 
940  av_assert0(!ifp->bound);
941  ifp->bound = 1;
942 
943  if (ifp->type != ofilter_src->type) {
944  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
945  av_get_media_type_string(ofilter_src->type),
947  return AVERROR(EINVAL);
948  }
949 
950  ifp->type_src = ifp->type;
951 
952  memset(&opts, 0, sizeof(opts));
953 
954  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->index);
955  opts.name = name;
956 
957  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
958  if (ret < 0)
959  return ret;
960 
961  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
962  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
963  if (ret < 0)
964  return ret;
965 
966  return 0;
967 }
968 
970 {
971  InputFilterPriv *ifp;
972  InputFilter *ifilter;
973 
974  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
975  if (!ifp)
976  return NULL;
977 
978  ifilter = &ifp->ifilter;
979  ifilter->graph = fg;
980 
981  ifp->frame = av_frame_alloc();
982  if (!ifp->frame)
983  return NULL;
984 
985  ifp->index = fg->nb_inputs - 1;
986  ifp->format = -1;
989 
991  if (!ifp->frame_queue)
992  return NULL;
993 
994  return ifilter;
995 }
996 
997 void fg_free(FilterGraph **pfg)
998 {
999  FilterGraph *fg = *pfg;
1000  FilterGraphPriv *fgp;
1001 
1002  if (!fg)
1003  return;
1004  fgp = fgp_from_fg(fg);
1005 
1006  for (int j = 0; j < fg->nb_inputs; j++) {
1007  InputFilter *ifilter = fg->inputs[j];
1008  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1009 
1010  if (ifp->frame_queue) {
1011  AVFrame *frame;
1012  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1013  av_frame_free(&frame);
1014  av_fifo_freep2(&ifp->frame_queue);
1015  }
1016  av_frame_free(&ifp->sub2video.frame);
1017 
1018  av_frame_free(&ifp->frame);
1019  av_frame_free(&ifp->opts.fallback);
1020 
1022  av_freep(&ifp->linklabel);
1023  av_freep(&ifp->opts.name);
1025  av_freep(&ifilter->name);
1026  av_freep(&fg->inputs[j]);
1027  }
1028  av_freep(&fg->inputs);
1029  for (int j = 0; j < fg->nb_outputs; j++) {
1030  OutputFilter *ofilter = fg->outputs[j];
1031  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1032 
1033  av_frame_free(&ofp->fps.last_frame);
1034  av_dict_free(&ofp->sws_opts);
1035  av_dict_free(&ofp->swr_opts);
1036 
1037  av_freep(&ofilter->linklabel);
1038  av_freep(&ofilter->name);
1039  av_freep(&ofilter->apad);
1040  av_freep(&ofp->name);
1043  av_freep(&fg->outputs[j]);
1044  }
1045  av_freep(&fg->outputs);
1046  av_freep(&fgp->graph_desc);
1047 
1048  av_frame_free(&fgp->frame);
1049  av_frame_free(&fgp->frame_enc);
1050 
1051  av_freep(pfg);
1052 }
1053 
1054 static const char *fg_item_name(void *obj)
1055 {
1056  const FilterGraphPriv *fgp = obj;
1057 
1058  return fgp->log_name;
1059 }
1060 
1061 static const AVClass fg_class = {
1062  .class_name = "FilterGraph",
1063  .version = LIBAVUTIL_VERSION_INT,
1064  .item_name = fg_item_name,
1065  .category = AV_CLASS_CATEGORY_FILTER,
1066 };
1067 
1068 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
1069 {
1070  FilterGraphPriv *fgp;
1071  FilterGraph *fg;
1072 
1074  AVFilterGraph *graph;
1075  int ret = 0;
1076 
1077  fgp = av_mallocz(sizeof(*fgp));
1078  if (!fgp) {
1079  av_freep(&graph_desc);
1080  return AVERROR(ENOMEM);
1081  }
1082  fg = &fgp->fg;
1083 
1084  if (pfg) {
1085  *pfg = fg;
1086  fg->index = -1;
1087  } else {
1089  if (ret < 0) {
1090  av_freep(&graph_desc);
1091  av_freep(&fgp);
1092  return ret;
1093  }
1094 
1095  fg->index = nb_filtergraphs - 1;
1096  }
1097 
1098  fg->class = &fg_class;
1099  fgp->graph_desc = graph_desc;
1101  fgp->nb_threads = -1;
1102  fgp->sch = sch;
1103 
1104  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1105 
1106  fgp->frame = av_frame_alloc();
1107  fgp->frame_enc = av_frame_alloc();
1108  if (!fgp->frame || !fgp->frame_enc)
1109  return AVERROR(ENOMEM);
1110 
1111  /* this graph is only used for determining the kinds of inputs
1112  * and outputs we have, and is discarded on exit from this function */
1113  graph = avfilter_graph_alloc();
1114  if (!graph)
1115  return AVERROR(ENOMEM);;
1116  graph->nb_threads = 1;
1117 
1118  ret = graph_parse(fg, graph, fgp->graph_desc, &inputs, &outputs,
1120  if (ret < 0)
1121  goto fail;
1122 
1123  for (unsigned i = 0; i < graph->nb_filters; i++) {
1124  const AVFilter *f = graph->filters[i]->filter;
1125  if ((!avfilter_filter_pad_count(f, 0) &&
1126  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1127  !strcmp(f->name, "apad")) {
1128  fgp->have_sources = 1;
1129  break;
1130  }
1131  }
1132 
1133  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1134  InputFilter *const ifilter = ifilter_alloc(fg);
1135  InputFilterPriv *ifp;
1136 
1137  if (!ifilter) {
1138  ret = AVERROR(ENOMEM);
1139  goto fail;
1140  }
1141 
1142  ifp = ifp_from_ifilter(ifilter);
1143  ifp->linklabel = cur->name;
1144  cur->name = NULL;
1145 
1146  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1147  cur->pad_idx);
1148 
1149  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1150  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1151  "currently.\n");
1152  ret = AVERROR(ENOSYS);
1153  goto fail;
1154  }
1155 
1156  ifilter->name = describe_filter_link(fg, cur, 1);
1157  if (!ifilter->name) {
1158  ret = AVERROR(ENOMEM);
1159  goto fail;
1160  }
1161  }
1162 
1163  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1164  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1165  cur->pad_idx);
1166  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1167 
1168  if (!ofilter) {
1169  ret = AVERROR(ENOMEM);
1170  goto fail;
1171  }
1172 
1173  ofilter->linklabel = cur->name;
1174  cur->name = NULL;
1175 
1176  ofilter->name = describe_filter_link(fg, cur, 0);
1177  if (!ofilter->name) {
1178  ret = AVERROR(ENOMEM);
1179  goto fail;
1180  }
1181  }
1182 
1183  if (!fg->nb_outputs) {
1184  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1185  ret = AVERROR(ENOSYS);
1186  goto fail;
1187  }
1188 
1189  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1190  filter_thread, fgp);
1191  if (ret < 0)
1192  goto fail;
1193  fgp->sch_idx = ret;
1194 
1195 fail:
1198  avfilter_graph_free(&graph);
1199 
1200  if (ret < 0)
1201  return ret;
1202 
1203  return 0;
1204 }
1205 
1207  InputStream *ist,
1208  char *graph_desc,
1209  Scheduler *sch, unsigned sched_idx_enc,
1210  const OutputFilterOptions *opts)
1211 {
1212  const enum AVMediaType type = ist->par->codec_type;
1213  FilterGraph *fg;
1214  FilterGraphPriv *fgp;
1215  int ret;
1216 
1217  ret = fg_create(pfg, graph_desc, sch);
1218  if (ret < 0)
1219  return ret;
1220  fg = *pfg;
1221  fgp = fgp_from_fg(fg);
1222 
1223  fgp->is_simple = 1;
1224 
1225  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1226  av_get_media_type_string(type)[0], opts->name);
1227 
1228  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1229  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1230  "to have exactly 1 input and 1 output. "
1231  "However, it had %d input(s) and %d output(s). Please adjust, "
1232  "or use a complex filtergraph (-filter_complex) instead.\n",
1233  graph_desc, fg->nb_inputs, fg->nb_outputs);
1234  return AVERROR(EINVAL);
1235  }
1236  if (fg->outputs[0]->type != type) {
1237  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1238  "it to %s output stream\n",
1241  return AVERROR(EINVAL);
1242  }
1243 
1244  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1245  if (ret < 0)
1246  return ret;
1247 
1248  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1249  if (ret < 0)
1250  return ret;
1251 
1252  if (opts->nb_threads >= 0)
1253  fgp->nb_threads = opts->nb_threads;
1254 
1255  return 0;
1256 }
1257 
1259 {
1260  FilterGraphPriv *fgp = fgp_from_fg(fg);
1261  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1262  InputStream *ist = NULL;
1263  enum AVMediaType type = ifp->type;
1265  const char *spec;
1266  char *p;
1267  int i, ret;
1268 
1269  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1270  // bind to a standalone decoder
1271  int dec_idx;
1272 
1273  dec_idx = strtol(ifp->linklabel + 4, &p, 0);
1274  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1275  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1276  dec_idx, fgp->graph_desc);
1277  return AVERROR(EINVAL);
1278  }
1279 
1280  if (type == AVMEDIA_TYPE_VIDEO) {
1281  spec = *p == ':' ? p + 1 : p;
1282  ret = view_specifier_parse(&spec, &vs);
1283  if (ret < 0)
1284  return ret;
1285  }
1286 
1287  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1288  if (ret < 0)
1289  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1290  ifilter->name);
1291  return ret;
1292  } else if (ifp->linklabel) {
1294  AVFormatContext *s;
1295  AVStream *st = NULL;
1296  int file_idx;
1297 
1298  // try finding an unbound filtergraph output with this label
1299  for (int i = 0; i < nb_filtergraphs; i++) {
1300  FilterGraph *fg_src = filtergraphs[i];
1301 
1302  if (fg == fg_src)
1303  continue;
1304 
1305  for (int j = 0; j < fg_src->nb_outputs; j++) {
1306  OutputFilter *ofilter = fg_src->outputs[j];
1307 
1308  if (!ofilter->bound && ofilter->linklabel &&
1309  !strcmp(ofilter->linklabel, ifp->linklabel)) {
1310  av_log(fg, AV_LOG_VERBOSE,
1311  "Binding input with label '%s' to filtergraph output %d:%d\n",
1312  ifp->linklabel, i, j);
1313 
1314  ret = ifilter_bind_fg(ifp, fg_src, j);
1315  if (ret < 0)
1316  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1317  ifp->linklabel);
1318  return ret;
1319  }
1320  }
1321  }
1322 
1323  // bind to an explicitly specified demuxer stream
1324  file_idx = strtol(ifp->linklabel, &p, 0);
1325  if (file_idx < 0 || file_idx >= nb_input_files) {
1326  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1327  file_idx, fgp->graph_desc);
1328  return AVERROR(EINVAL);
1329  }
1330  s = input_files[file_idx]->ctx;
1331 
1332  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1333  if (ret < 0) {
1334  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1335  return ret;
1336  }
1337 
1338  if (type == AVMEDIA_TYPE_VIDEO) {
1339  spec = ss.remainder ? ss.remainder : "";
1340  ret = view_specifier_parse(&spec, &vs);
1341  if (ret < 0) {
1343  return ret;
1344  }
1345  }
1346 
1347  for (i = 0; i < s->nb_streams; i++) {
1348  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1349  if (stream_type != type &&
1350  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1351  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1352  continue;
1353  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1354  st = s->streams[i];
1355  break;
1356  }
1357  }
1359  if (!st) {
1360  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1361  "matches no streams.\n", p, fgp->graph_desc);
1362  return AVERROR(EINVAL);
1363  }
1364  ist = input_files[file_idx]->streams[st->index];
1365 
1366  av_log(fg, AV_LOG_VERBOSE,
1367  "Binding input with label '%s' to input stream %d:%d\n",
1368  ifp->linklabel, ist->file->index, ist->index);
1369  } else {
1370  ist = ist_find_unused(type);
1371  if (!ist) {
1372  av_log(fg, AV_LOG_FATAL,
1373  "Cannot find an unused %s input stream to feed the "
1374  "unlabeled input pad %s.\n",
1375  av_get_media_type_string(type), ifilter->name);
1376  return AVERROR(EINVAL);
1377  }
1378 
1379  av_log(fg, AV_LOG_VERBOSE,
1380  "Binding unlabeled input %d to input stream %d:%d\n",
1381  ifp->index, ist->file->index, ist->index);
1382  }
1383  av_assert0(ist);
1384 
1385  ret = ifilter_bind_ist(ifilter, ist, &vs);
1386  if (ret < 0) {
1387  av_log(fg, AV_LOG_ERROR,
1388  "Error binding an input stream to complex filtergraph input %s.\n",
1389  ifilter->name);
1390  return ret;
1391  }
1392 
1393  return 0;
1394 }
1395 
1396 static int bind_inputs(FilterGraph *fg)
1397 {
1398  // bind filtergraph inputs to input streams or other filtergraphs
1399  for (int i = 0; i < fg->nb_inputs; i++) {
1401  int ret;
1402 
1403  if (ifp->bound)
1404  continue;
1405 
1406  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1407  if (ret < 0)
1408  return ret;
1409  }
1410 
1411  return 0;
1412 }
1413 
1415 {
1416  int ret;
1417 
1418  for (int i = 0; i < nb_filtergraphs; i++) {
1420  if (ret < 0)
1421  return ret;
1422  }
1423 
1424  // check that all outputs were bound
1425  for (int i = 0; i < nb_filtergraphs; i++) {
1426  FilterGraph *fg = filtergraphs[i];
1427 
1428  for (int j = 0; j < fg->nb_outputs; j++) {
1429  OutputFilter *output = fg->outputs[j];
1430  if (!output->bound) {
1431  av_log(fg, AV_LOG_FATAL,
1432  "Filter '%s' has output %d (%s) unconnected\n",
1433  output->name, j,
1434  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1435  return AVERROR(EINVAL);
1436  }
1437  }
1438  }
1439 
1440  return 0;
1441 }
1442 
1443 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1444  AVFilterContext **last_filter, int *pad_idx,
1445  const char *filter_name)
1446 {
1447  AVFilterGraph *graph = (*last_filter)->graph;
1449  const AVFilter *trim;
1450  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1451  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1452  int ret = 0;
1453 
1454  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1455  return 0;
1456 
1457  trim = avfilter_get_by_name(name);
1458  if (!trim) {
1459  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1460  "recording time.\n", name);
1461  return AVERROR_FILTER_NOT_FOUND;
1462  }
1463 
1464  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1465  if (!ctx)
1466  return AVERROR(ENOMEM);
1467 
1468  if (duration != INT64_MAX) {
1469  ret = av_opt_set_int(ctx, "durationi", duration,
1471  }
1472  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1473  ret = av_opt_set_int(ctx, "starti", start_time,
1475  }
1476  if (ret < 0) {
1477  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1478  return ret;
1479  }
1480 
1482  if (ret < 0)
1483  return ret;
1484 
1485  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1486  if (ret < 0)
1487  return ret;
1488 
1489  *last_filter = ctx;
1490  *pad_idx = 0;
1491  return 0;
1492 }
1493 
1494 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1495  const char *filter_name, const char *args)
1496 {
1497  AVFilterGraph *graph = (*last_filter)->graph;
1498  const AVFilter *filter = avfilter_get_by_name(filter_name);
1500  int ret;
1501 
1502  if (!filter)
1503  return AVERROR_BUG;
1504 
1506  filter,
1507  filter_name, args, NULL, graph);
1508  if (ret < 0)
1509  return ret;
1510 
1511  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1512  if (ret < 0)
1513  return ret;
1514 
1515  *last_filter = ctx;
1516  *pad_idx = 0;
1517  return 0;
1518 }
1519 
1521  OutputFilter *ofilter, AVFilterInOut *out)
1522 {
1523  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1524  AVFilterContext *last_filter = out->filter_ctx;
1525  AVBPrint bprint;
1526  int pad_idx = out->pad_idx;
1527  int ret;
1528  char name[255];
1529 
1530  snprintf(name, sizeof(name), "out_%s", ofp->name);
1532  avfilter_get_by_name("buffersink"),
1533  name, NULL, NULL, graph);
1534 
1535  if (ret < 0)
1536  return ret;
1537 
1538  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1539  char args[255];
1541  const AVDictionaryEntry *e = NULL;
1542 
1543  snprintf(args, sizeof(args), "%d:%d",
1544  ofp->width, ofp->height);
1545 
1546  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1547  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1548  }
1549 
1550  snprintf(name, sizeof(name), "scaler_out_%s", ofp->name);
1552  name, args, NULL, graph)) < 0)
1553  return ret;
1554  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1555  return ret;
1556 
1557  last_filter = filter;
1558  pad_idx = 0;
1559  }
1560 
1562  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1564  choose_pix_fmts(ofp, &bprint);
1565  choose_color_spaces(ofp, &bprint);
1566  choose_color_ranges(ofp, &bprint);
1567  if (!av_bprint_is_complete(&bprint))
1568  return AVERROR(ENOMEM);
1569 
1570  if (bprint.len) {
1572 
1574  avfilter_get_by_name("format"),
1575  "format", bprint.str, NULL, graph);
1576  av_bprint_finalize(&bprint, NULL);
1577  if (ret < 0)
1578  return ret;
1579  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1580  return ret;
1581 
1582  last_filter = filter;
1583  pad_idx = 0;
1584  }
1585 
1586  snprintf(name, sizeof(name), "trim_out_%s", ofp->name);
1587  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1588  &last_filter, &pad_idx, name);
1589  if (ret < 0)
1590  return ret;
1591 
1592 
1593  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1594  return ret;
1595 
1596  return 0;
1597 }
1598 
1600  OutputFilter *ofilter, AVFilterInOut *out)
1601 {
1602  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1603  AVFilterContext *last_filter = out->filter_ctx;
1604  int pad_idx = out->pad_idx;
1605  AVBPrint args;
1606  char name[255];
1607  int ret;
1608 
1609  snprintf(name, sizeof(name), "out_%s", ofp->name);
1611  avfilter_get_by_name("abuffersink"),
1612  name, NULL, NULL, graph);
1613  if (ret < 0)
1614  return ret;
1615 
1616 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1617  AVFilterContext *filt_ctx; \
1618  \
1619  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1620  "similarly to -af " filter_name "=%s.\n", arg); \
1621  \
1622  ret = avfilter_graph_create_filter(&filt_ctx, \
1623  avfilter_get_by_name(filter_name), \
1624  filter_name, arg, NULL, graph); \
1625  if (ret < 0) \
1626  goto fail; \
1627  \
1628  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1629  if (ret < 0) \
1630  goto fail; \
1631  \
1632  last_filter = filt_ctx; \
1633  pad_idx = 0; \
1634 } while (0)
1636 
1637  choose_sample_fmts(ofp, &args);
1638  choose_sample_rates(ofp, &args);
1639  choose_channel_layouts(ofp, &args);
1640  if (!av_bprint_is_complete(&args)) {
1641  ret = AVERROR(ENOMEM);
1642  goto fail;
1643  }
1644  if (args.len) {
1646 
1647  snprintf(name, sizeof(name), "format_out_%s", ofp->name);
1649  avfilter_get_by_name("aformat"),
1650  name, args.str, NULL, graph);
1651  if (ret < 0)
1652  goto fail;
1653 
1654  ret = avfilter_link(last_filter, pad_idx, format, 0);
1655  if (ret < 0)
1656  goto fail;
1657 
1658  last_filter = format;
1659  pad_idx = 0;
1660  }
1661 
1662  if (ofilter->apad) {
1663  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1664  fgp->have_sources = 1;
1665  }
1666 
1667  snprintf(name, sizeof(name), "trim for output %s", ofp->name);
1668  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1669  &last_filter, &pad_idx, name);
1670  if (ret < 0)
1671  goto fail;
1672 
1673  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1674  goto fail;
1675 fail:
1676  av_bprint_finalize(&args, NULL);
1677 
1678  return ret;
1679 }
1680 
1682  OutputFilter *ofilter, AVFilterInOut *out)
1683 {
1684  switch (ofilter->type) {
1685  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1686  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1687  default: av_assert0(0); return 0;
1688  }
1689 }
1690 
1692 {
1693  ifp->sub2video.last_pts = INT64_MIN;
1694  ifp->sub2video.end_pts = INT64_MIN;
1695 
1696  /* sub2video structure has been (re-)initialized.
1697  Mark it as such so that the system will be
1698  initialized with the first received heartbeat. */
1699  ifp->sub2video.initialize = 1;
1700 }
1701 
1703  InputFilter *ifilter, AVFilterInOut *in)
1704 {
1705  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1706 
1707  AVFilterContext *last_filter;
1708  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1709  const AVPixFmtDescriptor *desc;
1710  char name[255];
1711  int ret, pad_idx = 0;
1713  if (!par)
1714  return AVERROR(ENOMEM);
1715 
1716  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1717  sub2video_prepare(ifp);
1718 
1719  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1720  ifp->opts.name);
1721 
1722  ifp->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1723  if (!ifp->filter) {
1724  ret = AVERROR(ENOMEM);
1725  goto fail;
1726  }
1727 
1728  par->format = ifp->format;
1729  par->time_base = ifp->time_base;
1730  par->frame_rate = ifp->opts.framerate;
1731  par->width = ifp->width;
1732  par->height = ifp->height;
1733  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1734  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1735  par->color_space = ifp->color_space;
1736  par->color_range = ifp->color_range;
1737  par->hw_frames_ctx = ifp->hw_frames_ctx;
1738  par->side_data = ifp->side_data;
1739  par->nb_side_data = ifp->nb_side_data;
1740 
1741  ret = av_buffersrc_parameters_set(ifp->filter, par);
1742  if (ret < 0)
1743  goto fail;
1744  av_freep(&par);
1745 
1746  ret = avfilter_init_dict(ifp->filter, NULL);
1747  if (ret < 0)
1748  goto fail;
1749 
1750  last_filter = ifp->filter;
1751 
1753  av_assert0(desc);
1754 
1755  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1756  char crop_buf[64];
1757  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1758  ifp->opts.crop_left, ifp->opts.crop_right,
1759  ifp->opts.crop_top, ifp->opts.crop_bottom,
1760  ifp->opts.crop_left, ifp->opts.crop_top);
1761  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1762  if (ret < 0)
1763  return ret;
1764  }
1765 
1766  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1767  ifp->displaymatrix_applied = 0;
1768  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1769  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1770  int32_t *displaymatrix = ifp->displaymatrix;
1771  double theta;
1772 
1773  theta = get_rotation(displaymatrix);
1774 
1775  if (fabs(theta - 90) < 1.0) {
1776  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1777  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1778  } else if (fabs(theta - 180) < 1.0) {
1779  if (displaymatrix[0] < 0) {
1780  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1781  if (ret < 0)
1782  return ret;
1783  }
1784  if (displaymatrix[4] < 0) {
1785  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1786  }
1787  } else if (fabs(theta - 270) < 1.0) {
1788  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1789  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1790  } else if (fabs(theta) > 1.0) {
1791  char rotate_buf[64];
1792  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1793  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1794  } else if (fabs(theta) < 1.0) {
1795  if (displaymatrix && displaymatrix[4] < 0) {
1796  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1797  }
1798  }
1799  if (ret < 0)
1800  return ret;
1801 
1802  ifp->displaymatrix_applied = 1;
1803  }
1804 
1805  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1806  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1807  &last_filter, &pad_idx, name);
1808  if (ret < 0)
1809  return ret;
1810 
1811  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1812  return ret;
1813  return 0;
1814 fail:
1815  av_freep(&par);
1816 
1817  return ret;
1818 }
1819 
1821  InputFilter *ifilter, AVFilterInOut *in)
1822 {
1823  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1824  AVFilterContext *last_filter;
1825  AVBufferSrcParameters *par;
1826  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1827  AVBPrint args;
1828  char name[255];
1829  int ret, pad_idx = 0;
1830 
1832  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1833  ifp->time_base.num, ifp->time_base.den,
1834  ifp->sample_rate,
1836  if (av_channel_layout_check(&ifp->ch_layout) &&
1838  av_bprintf(&args, ":channel_layout=");
1840  } else
1841  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1842  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1843 
1844  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1845  name, args.str, NULL,
1846  graph)) < 0)
1847  return ret;
1849  if (!par)
1850  return AVERROR(ENOMEM);
1851  par->side_data = ifp->side_data;
1852  par->nb_side_data = ifp->nb_side_data;
1853  ret = av_buffersrc_parameters_set(ifp->filter, par);
1854  av_free(par);
1855  if (ret < 0)
1856  return ret;
1857  last_filter = ifp->filter;
1858 
1859  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1860  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1861  &last_filter, &pad_idx, name);
1862  if (ret < 0)
1863  return ret;
1864 
1865  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1866  return ret;
1867 
1868  return 0;
1869 }
1870 
1872  InputFilter *ifilter, AVFilterInOut *in)
1873 {
1874  switch (ifp_from_ifilter(ifilter)->type) {
1875  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1876  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1877  default: av_assert0(0); return 0;
1878  }
1879 }
1880 
1882 {
1883  for (int i = 0; i < fg->nb_outputs; i++)
1885  for (int i = 0; i < fg->nb_inputs; i++)
1886  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1887  avfilter_graph_free(&fgt->graph);
1888 }
1889 
1891 {
1892  return f->nb_inputs == 0 &&
1893  (!strcmp(f->filter->name, "buffer") ||
1894  !strcmp(f->filter->name, "abuffer"));
1895 }
1896 
1897 static int graph_is_meta(AVFilterGraph *graph)
1898 {
1899  for (unsigned i = 0; i < graph->nb_filters; i++) {
1900  const AVFilterContext *f = graph->filters[i];
1901 
1902  /* in addition to filters flagged as meta, also
1903  * disregard sinks and buffersources (but not other sources,
1904  * since they introduce data we are not aware of)
1905  */
1906  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1907  f->nb_outputs == 0 ||
1909  return 0;
1910  }
1911  return 1;
1912 }
1913 
1914 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1915 
1917 {
1918  FilterGraphPriv *fgp = fgp_from_fg(fg);
1919  AVBufferRef *hw_device;
1920  AVFilterInOut *inputs, *outputs, *cur;
1921  int ret = AVERROR_BUG, i, simple = filtergraph_is_simple(fg);
1922  int have_input_eof = 0;
1923  const char *graph_desc = fgp->graph_desc;
1924 
1925  cleanup_filtergraph(fg, fgt);
1926  fgt->graph = avfilter_graph_alloc();
1927  if (!fgt->graph)
1928  return AVERROR(ENOMEM);
1929 
1930  if (simple) {
1931  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1932 
1933  if (filter_nbthreads) {
1934  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1935  if (ret < 0)
1936  goto fail;
1937  } else if (fgp->nb_threads >= 0) {
1938  ret = av_opt_set_int(fgt->graph, "threads", fgp->nb_threads, 0);
1939  if (ret < 0)
1940  return ret;
1941  }
1942 
1943  if (av_dict_count(ofp->sws_opts)) {
1945  &fgt->graph->scale_sws_opts,
1946  '=', ':');
1947  if (ret < 0)
1948  goto fail;
1949  }
1950 
1951  if (av_dict_count(ofp->swr_opts)) {
1952  char *args;
1953  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1954  if (ret < 0)
1955  goto fail;
1956  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1957  av_free(args);
1958  }
1959  } else {
1961  }
1962 
1963  hw_device = hw_device_for_filter();
1964 
1965  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
1966  if (ret < 0)
1967  goto fail;
1968 
1969  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1970  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1973  goto fail;
1974  }
1976 
1977  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1978  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
1979  if (ret < 0) {
1981  goto fail;
1982  }
1983  }
1985 
1986  if (fgp->disable_conversions)
1988  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1989  goto fail;
1990 
1991  fgp->is_meta = graph_is_meta(fgt->graph);
1992 
1993  /* limit the lists of allowed formats to the ones selected, to
1994  * make sure they stay the same if the filtergraph is reconfigured later */
1995  for (int i = 0; i < fg->nb_outputs; i++) {
1996  const AVFrameSideData *const *sd;
1997  int nb_sd;
1998  OutputFilter *ofilter = fg->outputs[i];
1999  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2000  AVFilterContext *sink = ofp->filter;
2001 
2002  ofp->format = av_buffersink_get_format(sink);
2003 
2004  ofp->width = av_buffersink_get_w(sink);
2005  ofp->height = av_buffersink_get_h(sink);
2008 
2009  // If the timing parameters are not locked yet, get the tentative values
2010  // here but don't lock them. They will only be used if no output frames
2011  // are ever produced.
2012  if (!ofp->tb_out_locked) {
2014  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
2015  fr.num > 0 && fr.den > 0)
2016  ofp->fps.framerate = fr;
2017  ofp->tb_out = av_buffersink_get_time_base(sink);
2018  }
2020 
2023  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
2024  if (ret < 0)
2025  goto fail;
2027  sd = av_buffersink_get_side_data(sink, &nb_sd);
2028  if (nb_sd)
2029  for (int j = 0; j < nb_sd; j++) {
2031  sd[j], 0);
2032  if (ret < 0) {
2034  goto fail;
2035  }
2036  }
2037  }
2038 
2039  for (int i = 0; i < fg->nb_inputs; i++) {
2041  AVFrame *tmp;
2042  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2043  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2044  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2045  } else {
2046  if (ifp->type_src == AVMEDIA_TYPE_VIDEO) {
2047  if (ifp->displaymatrix_applied)
2049  }
2051  }
2052  av_frame_free(&tmp);
2053  if (ret < 0)
2054  goto fail;
2055  }
2056  }
2057 
2058  /* send the EOFs for the finished inputs */
2059  for (int i = 0; i < fg->nb_inputs; i++) {
2061  if (fgt->eof_in[i]) {
2063  if (ret < 0)
2064  goto fail;
2065  have_input_eof = 1;
2066  }
2067  }
2068 
2069  if (have_input_eof) {
2070  // make sure the EOF propagates to the end of the graph
2072  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2073  goto fail;
2074  }
2075 
2076  return 0;
2077 fail:
2078  cleanup_filtergraph(fg, fgt);
2079  return ret;
2080 }
2081 
2083 {
2084  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2085  AVFrameSideData *sd;
2086  int ret;
2087 
2088  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2089  if (ret < 0)
2090  return ret;
2091 
2092  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2093  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2094  frame->time_base;
2095 
2096  ifp->format = frame->format;
2097 
2098  ifp->width = frame->width;
2099  ifp->height = frame->height;
2100  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2101  ifp->color_space = frame->colorspace;
2102  ifp->color_range = frame->color_range;
2103 
2104  ifp->sample_rate = frame->sample_rate;
2105  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2106  if (ret < 0)
2107  return ret;
2108 
2110  for (int i = 0; i < frame->nb_side_data; i++) {
2111  const AVSideDataDescriptor *desc = av_frame_side_data_desc(frame->side_data[i]->type);
2112 
2113  if (!(desc->props & AV_SIDE_DATA_PROP_GLOBAL))
2114  continue;
2115 
2117  &ifp->nb_side_data,
2118  frame->side_data[i], 0);
2119  if (ret < 0)
2120  return ret;
2121  }
2122 
2124  if (sd)
2125  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2126  ifp->displaymatrix_present = !!sd;
2127 
2128  /* Copy downmix related side data to InputFilterPriv so it may be propagated
2129  * to the filter chain even though it's not "global", as filters like aresample
2130  * require this information during init and not when remixing a frame */
2132  if (sd) {
2134  &ifp->nb_side_data, sd, 0);
2135  if (ret < 0)
2136  return ret;
2137  memcpy(&ifp->downmixinfo, sd->data, sizeof(ifp->downmixinfo));
2138  }
2139  ifp->downmixinfo_present = !!sd;
2140 
2141  return 0;
2142 }
2143 
2145 {
2146  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2147  return fgp->is_simple;
2148 }
2149 
2150 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2151  double time, const char *target,
2152  const char *command, const char *arg, int all_filters)
2153 {
2154  int ret;
2155 
2156  if (!graph)
2157  return;
2158 
2159  if (time < 0) {
2160  char response[4096];
2161  ret = avfilter_graph_send_command(graph, target, command, arg,
2162  response, sizeof(response),
2163  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2164  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2165  fg->index, ret, response);
2166  } else if (!all_filters) {
2167  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2168  } else {
2169  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2170  if (ret < 0)
2171  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2172  }
2173 }
2174 
2175 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2176 {
2177  int nb_requests, nb_requests_max = -1;
2178  int best_input = -1;
2179 
2180  for (int i = 0; i < fg->nb_inputs; i++) {
2181  InputFilter *ifilter = fg->inputs[i];
2182  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2183 
2184  if (fgt->eof_in[i])
2185  continue;
2186 
2187  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2188  if (nb_requests > nb_requests_max) {
2189  nb_requests_max = nb_requests;
2190  best_input = i;
2191  }
2192  }
2193 
2194  av_assert0(best_input >= 0);
2195 
2196  return best_input;
2197 }
2198 
2200 {
2201  OutputFilter *ofilter = &ofp->ofilter;
2202  FPSConvContext *fps = &ofp->fps;
2203  AVRational tb = (AVRational){ 0, 0 };
2204  AVRational fr;
2205  const FrameData *fd;
2206 
2207  fd = frame_data_c(frame);
2208 
2209  // apply -enc_time_base
2210  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2211  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2212  av_log(ofp, AV_LOG_ERROR,
2213  "Demuxing timebase not available - cannot use it for encoding\n");
2214  return AVERROR(EINVAL);
2215  }
2216 
2217  switch (ofp->enc_timebase.num) {
2218  case 0: break;
2219  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2220  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2221  default: tb = ofp->enc_timebase; break;
2222  }
2223 
2224  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2225  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2226  goto finish;
2227  }
2228 
2229  fr = fps->framerate;
2230  if (!fr.num) {
2232  if (fr_sink.num > 0 && fr_sink.den > 0)
2233  fr = fr_sink;
2234  }
2235 
2236  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2237  if (!fr.num && !fps->framerate_max.num) {
2238  fr = (AVRational){25, 1};
2239  av_log(ofp, AV_LOG_WARNING,
2240  "No information "
2241  "about the input framerate is available. Falling "
2242  "back to a default value of 25fps. Use the -r option "
2243  "if you want a different framerate.\n");
2244  }
2245 
2246  if (fps->framerate_max.num &&
2247  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2248  !fr.den))
2249  fr = fps->framerate_max;
2250  }
2251 
2252  if (fr.num > 0) {
2253  if (fps->framerate_supported) {
2254  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2255  fr = fps->framerate_supported[idx];
2256  }
2257  if (fps->framerate_clip) {
2258  av_reduce(&fr.num, &fr.den,
2259  fr.num, fr.den, fps->framerate_clip);
2260  }
2261  }
2262 
2263  if (!(tb.num > 0 && tb.den > 0))
2264  tb = av_inv_q(fr);
2265  if (!(tb.num > 0 && tb.den > 0))
2266  tb = frame->time_base;
2267 
2268  fps->framerate = fr;
2269 finish:
2270  ofp->tb_out = tb;
2271  ofp->tb_out_locked = 1;
2272 
2273  return 0;
2274 }
2275 
2276 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2277  AVRational tb_dst, int64_t start_time)
2278 {
2279  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2280 
2281  AVRational tb = tb_dst;
2282  AVRational filter_tb = frame->time_base;
2283  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2284 
2285  if (frame->pts == AV_NOPTS_VALUE)
2286  goto early_exit;
2287 
2288  tb.den <<= extra_bits;
2289  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2291  float_pts /= 1 << extra_bits;
2292  // when float_pts is not exactly an integer,
2293  // avoid exact midpoints to reduce the chance of rounding differences, this
2294  // can be removed in case the fps code is changed to work with integers
2295  if (float_pts != llrint(float_pts))
2296  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2297 
2298  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2300  frame->time_base = tb_dst;
2301 
2302 early_exit:
2303 
2304  if (debug_ts) {
2305  av_log(logctx, AV_LOG_INFO,
2306  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2307  frame ? av_ts2str(frame->pts) : "NULL",
2308  av_ts2timestr(frame->pts, &tb_dst),
2309  float_pts, tb_dst.num, tb_dst.den);
2310  }
2311 
2312  return float_pts;
2313 }
2314 
2315 /* Convert frame timestamps to the encoder timebase and decide how many times
2316  * should this (and possibly previous) frame be repeated in order to conform to
2317  * desired target framerate (if any).
2318  */
2320  int64_t *nb_frames, int64_t *nb_frames_prev)
2321 {
2322  OutputFilter *ofilter = &ofp->ofilter;
2323  FPSConvContext *fps = &ofp->fps;
2324  double delta0, delta, sync_ipts, duration;
2325 
2326  if (!frame) {
2327  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2328  fps->frames_prev_hist[1],
2329  fps->frames_prev_hist[2]);
2330 
2331  if (!*nb_frames && fps->last_dropped) {
2332  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2333  fps->last_dropped++;
2334  }
2335 
2336  goto finish;
2337  }
2338 
2339  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2340 
2341  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2342  ofp->tb_out, ofp->ts_offset);
2343  /* delta0 is the "drift" between the input frame and
2344  * where it would fall in the output. */
2345  delta0 = sync_ipts - ofp->next_pts;
2346  delta = delta0 + duration;
2347 
2348  // tracks the number of times the PREVIOUS frame should be duplicated,
2349  // mostly for variable framerate (VFR)
2350  *nb_frames_prev = 0;
2351  /* by default, we output a single frame */
2352  *nb_frames = 1;
2353 
2354  if (delta0 < 0 &&
2355  delta > 0 &&
2358  && fps->vsync_method != VSYNC_DROP
2359 #endif
2360  ) {
2361  if (delta0 < -0.6) {
2362  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2363  } else
2364  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2365  sync_ipts = ofp->next_pts;
2366  duration += delta0;
2367  delta0 = 0;
2368  }
2369 
2370  switch (fps->vsync_method) {
2371  case VSYNC_VSCFR:
2372  if (fps->frame_number == 0 && delta0 >= 0.5) {
2373  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2374  delta = duration;
2375  delta0 = 0;
2376  ofp->next_pts = llrint(sync_ipts);
2377  }
2378  case VSYNC_CFR:
2379  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2380  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2381  *nb_frames = 0;
2382  } else if (delta < -1.1)
2383  *nb_frames = 0;
2384  else if (delta > 1.1) {
2385  *nb_frames = llrintf(delta);
2386  if (delta0 > 1.1)
2387  *nb_frames_prev = llrintf(delta0 - 0.6);
2388  }
2389  frame->duration = 1;
2390  break;
2391  case VSYNC_VFR:
2392  if (delta <= -0.6)
2393  *nb_frames = 0;
2394  else if (delta > 0.6)
2395  ofp->next_pts = llrint(sync_ipts);
2396  frame->duration = llrint(duration);
2397  break;
2398 #if FFMPEG_OPT_VSYNC_DROP
2399  case VSYNC_DROP:
2400 #endif
2401  case VSYNC_PASSTHROUGH:
2402  ofp->next_pts = llrint(sync_ipts);
2403  frame->duration = llrint(duration);
2404  break;
2405  default:
2406  av_assert0(0);
2407  }
2408 
2409 finish:
2410  memmove(fps->frames_prev_hist + 1,
2411  fps->frames_prev_hist,
2412  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2413  fps->frames_prev_hist[0] = *nb_frames_prev;
2414 
2415  if (*nb_frames_prev == 0 && fps->last_dropped) {
2416  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2417  av_log(ofp, AV_LOG_VERBOSE,
2418  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2419  fps->frame_number, fps->last_frame->pts);
2420  }
2421  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2422  uint64_t nb_frames_dup;
2423  if (*nb_frames > dts_error_threshold * 30) {
2424  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2425  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2426  *nb_frames = 0;
2427  return;
2428  }
2429  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2430  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2431  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2432  if (nb_frames_dup > fps->dup_warning) {
2433  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2434  fps->dup_warning *= 10;
2435  }
2436  }
2437 
2438  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2439  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2440 }
2441 
2443 {
2445  int ret;
2446 
2447  // we are finished and no frames were ever seen at this output,
2448  // at least initialize the encoder with a dummy frame
2449  if (!fgt->got_frame) {
2450  AVFrame *frame = fgt->frame;
2451  FrameData *fd;
2452 
2453  frame->time_base = ofp->tb_out;
2454  frame->format = ofp->format;
2455 
2456  frame->width = ofp->width;
2457  frame->height = ofp->height;
2458  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2459 
2460  frame->sample_rate = ofp->sample_rate;
2461  if (ofp->ch_layout.nb_channels) {
2462  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2463  if (ret < 0)
2464  return ret;
2465  }
2466  av_frame_side_data_free(&frame->side_data, &frame->nb_side_data);
2467  ret = clone_side_data(&frame->side_data, &frame->nb_side_data,
2468  ofp->side_data, ofp->nb_side_data, 0);
2469  if (ret < 0)
2470  return ret;
2471 
2472  fd = frame_data(frame);
2473  if (!fd)
2474  return AVERROR(ENOMEM);
2475 
2476  fd->frame_rate_filter = ofp->fps.framerate;
2477 
2478  av_assert0(!frame->buf[0]);
2479 
2480  av_log(ofp, AV_LOG_WARNING,
2481  "No filtered frames for output stream, trying to "
2482  "initialize anyway.\n");
2483 
2484  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2485  if (ret < 0) {
2487  return ret;
2488  }
2489  }
2490 
2491  fgt->eof_out[ofp->index] = 1;
2492 
2493  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2494  return (ret == AVERROR_EOF) ? 0 : ret;
2495 }
2496 
2498  AVFrame *frame)
2499 {
2501  AVFrame *frame_prev = ofp->fps.last_frame;
2502  enum AVMediaType type = ofp->ofilter.type;
2503 
2504  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2505 
2506  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2507  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2508 
2509  for (int64_t i = 0; i < nb_frames; i++) {
2510  AVFrame *frame_out;
2511  int ret;
2512 
2513  if (type == AVMEDIA_TYPE_VIDEO) {
2514  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2515  frame_prev : frame;
2516  if (!frame_in)
2517  break;
2518 
2519  frame_out = fgp->frame_enc;
2520  ret = av_frame_ref(frame_out, frame_in);
2521  if (ret < 0)
2522  return ret;
2523 
2524  frame_out->pts = ofp->next_pts;
2525 
2526  if (ofp->fps.dropped_keyframe) {
2527  frame_out->flags |= AV_FRAME_FLAG_KEY;
2528  ofp->fps.dropped_keyframe = 0;
2529  }
2530  } else {
2531  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2532  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2534 
2535  frame->time_base = ofp->tb_out;
2536  frame->duration = av_rescale_q(frame->nb_samples,
2537  (AVRational){ 1, frame->sample_rate },
2538  ofp->tb_out);
2539 
2540  ofp->next_pts = frame->pts + frame->duration;
2541 
2542  frame_out = frame;
2543  }
2544 
2545  // send the frame to consumers
2546  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2547  if (ret < 0) {
2548  av_frame_unref(frame_out);
2549 
2550  if (!fgt->eof_out[ofp->index]) {
2551  fgt->eof_out[ofp->index] = 1;
2552  fgp->nb_outputs_done++;
2553  }
2554 
2555  return ret == AVERROR_EOF ? 0 : ret;
2556  }
2557 
2558  if (type == AVMEDIA_TYPE_VIDEO) {
2559  ofp->fps.frame_number++;
2560  ofp->next_pts++;
2561 
2562  if (i == nb_frames_prev && frame)
2563  frame->flags &= ~AV_FRAME_FLAG_KEY;
2564  }
2565 
2566  fgt->got_frame = 1;
2567  }
2568 
2569  if (frame && frame_prev) {
2570  av_frame_unref(frame_prev);
2571  av_frame_move_ref(frame_prev, frame);
2572  }
2573 
2574  if (!frame)
2575  return close_output(ofp, fgt);
2576 
2577  return 0;
2578 }
2579 
2581  AVFrame *frame)
2582 {
2584  AVFilterContext *filter = ofp->filter;
2585  FrameData *fd;
2586  int ret;
2587 
2590  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2591  ret = fg_output_frame(ofp, fgt, NULL);
2592  return (ret < 0) ? ret : 1;
2593  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2594  return 1;
2595  } else if (ret < 0) {
2596  av_log(ofp, AV_LOG_WARNING,
2597  "Error in retrieving a frame from the filtergraph: %s\n",
2598  av_err2str(ret));
2599  return ret;
2600  }
2601 
2602  if (fgt->eof_out[ofp->index]) {
2604  return 0;
2605  }
2606 
2608 
2609  if (debug_ts)
2610  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2611  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2612  frame->time_base.num, frame->time_base.den);
2613 
2614  // Choose the output timebase the first time we get a frame.
2615  if (!ofp->tb_out_locked) {
2616  ret = choose_out_timebase(ofp, frame);
2617  if (ret < 0) {
2618  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2620  return ret;
2621  }
2622  }
2623 
2624  fd = frame_data(frame);
2625  if (!fd) {
2627  return AVERROR(ENOMEM);
2628  }
2629 
2631 
2632  // only use bits_per_raw_sample passed through from the decoder
2633  // if the filtergraph did not touch the frame data
2634  if (!fgp->is_meta)
2635  fd->bits_per_raw_sample = 0;
2636 
2637  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2638  if (!frame->duration) {
2640  if (fr.num > 0 && fr.den > 0)
2641  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2642  }
2643 
2644  fd->frame_rate_filter = ofp->fps.framerate;
2645  }
2646 
2647  ret = fg_output_frame(ofp, fgt, frame);
2649  if (ret < 0)
2650  return ret;
2651 
2652  return 0;
2653 }
2654 
2655 /* retrieve all frames available at filtergraph outputs
2656  * and send them to consumers */
2658  AVFrame *frame)
2659 {
2660  FilterGraphPriv *fgp = fgp_from_fg(fg);
2661  int did_step = 0;
2662 
2663  // graph not configured, just select the input to request
2664  if (!fgt->graph) {
2665  for (int i = 0; i < fg->nb_inputs; i++) {
2667  if (ifp->format < 0 && !fgt->eof_in[i]) {
2668  fgt->next_in = i;
2669  return 0;
2670  }
2671  }
2672 
2673  // This state - graph is not configured, but all inputs are either
2674  // initialized or EOF - should be unreachable because sending EOF to a
2675  // filter without even a fallback format should fail
2676  av_assert0(0);
2677  return AVERROR_BUG;
2678  }
2679 
2680  while (fgp->nb_outputs_done < fg->nb_outputs) {
2681  int ret;
2682 
2684  if (ret == AVERROR(EAGAIN)) {
2685  fgt->next_in = choose_input(fg, fgt);
2686  break;
2687  } else if (ret < 0) {
2688  if (ret == AVERROR_EOF)
2689  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2690  else
2691  av_log(fg, AV_LOG_ERROR,
2692  "Error requesting a frame from the filtergraph: %s\n",
2693  av_err2str(ret));
2694  return ret;
2695  }
2696  fgt->next_in = fg->nb_inputs;
2697 
2698  // return after one iteration, so that scheduler can rate-control us
2699  if (did_step && fgp->have_sources)
2700  return 0;
2701 
2702  /* Reap all buffers present in the buffer sinks */
2703  for (int i = 0; i < fg->nb_outputs; i++) {
2705 
2706  ret = 0;
2707  while (!ret) {
2708  ret = fg_output_step(ofp, fgt, frame);
2709  if (ret < 0)
2710  return ret;
2711  }
2712  }
2713  did_step = 1;
2714  }
2715 
2716  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2717 }
2718 
2720 {
2721  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2722  int64_t pts2;
2723 
2724  /* subtitles seem to be usually muxed ahead of other streams;
2725  if not, subtracting a larger time here is necessary */
2726  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2727 
2728  /* do not send the heartbeat frame if the subtitle is already ahead */
2729  if (pts2 <= ifp->sub2video.last_pts)
2730  return;
2731 
2732  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2733  /* if we have hit the end of the current displayed subpicture,
2734  or if we need to initialize the system, update the
2735  overlayed subpicture and its start/end times */
2736  sub2video_update(ifp, pts2 + 1, NULL);
2737  else
2738  sub2video_push_ref(ifp, pts2);
2739 }
2740 
2741 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2742 {
2743  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2744  int ret;
2745 
2746  if (buffer) {
2747  AVFrame *tmp;
2748 
2749  if (!frame)
2750  return 0;
2751 
2752  tmp = av_frame_alloc();
2753  if (!tmp)
2754  return AVERROR(ENOMEM);
2755 
2757 
2758  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2759  if (ret < 0) {
2760  av_frame_free(&tmp);
2761  return ret;
2762  }
2763 
2764  return 0;
2765  }
2766 
2767  // heartbeat frame
2768  if (frame && !frame->buf[0]) {
2769  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2770  return 0;
2771  }
2772 
2773  if (!frame) {
2774  if (ifp->sub2video.end_pts < INT64_MAX)
2775  sub2video_update(ifp, INT64_MAX, NULL);
2776 
2777  return av_buffersrc_add_frame(ifp->filter, NULL);
2778  }
2779 
2780  ifp->width = frame->width ? frame->width : ifp->width;
2781  ifp->height = frame->height ? frame->height : ifp->height;
2782 
2783  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2784 
2785  return 0;
2786 }
2787 
2788 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2789  int64_t pts, AVRational tb)
2790 {
2791  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2792  int ret;
2793 
2794  if (fgt->eof_in[ifp->index])
2795  return 0;
2796 
2797  fgt->eof_in[ifp->index] = 1;
2798 
2799  if (ifp->filter) {
2800  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2802 
2804  if (ret < 0)
2805  return ret;
2806  } else {
2807  if (ifp->format < 0) {
2808  // the filtergraph was never configured, use the fallback parameters
2809  ifp->format = ifp->opts.fallback->format;
2810  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2811  ifp->width = ifp->opts.fallback->width;
2812  ifp->height = ifp->opts.fallback->height;
2814  ifp->color_space = ifp->opts.fallback->colorspace;
2815  ifp->color_range = ifp->opts.fallback->color_range;
2816  ifp->time_base = ifp->opts.fallback->time_base;
2817 
2819  &ifp->opts.fallback->ch_layout);
2820  if (ret < 0)
2821  return ret;
2822 
2824  ret = clone_side_data(&ifp->side_data, &ifp->nb_side_data,
2825  ifp->opts.fallback->side_data,
2826  ifp->opts.fallback->nb_side_data, 0);
2827  if (ret < 0)
2828  return ret;
2829 
2830  if (ifilter_has_all_input_formats(ifilter->graph)) {
2831  ret = configure_filtergraph(ifilter->graph, fgt);
2832  if (ret < 0) {
2833  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
2834  return ret;
2835  }
2836  }
2837  }
2838 
2839  if (ifp->format < 0) {
2840  av_log(ifilter->graph, AV_LOG_ERROR,
2841  "Cannot determine format of input %s after EOF\n",
2842  ifp->opts.name);
2843  return AVERROR_INVALIDDATA;
2844  }
2845  }
2846 
2847  return 0;
2848 }
2849 
2851  VIDEO_CHANGED = (1 << 0),
2852  AUDIO_CHANGED = (1 << 1),
2853  MATRIX_CHANGED = (1 << 2),
2854  DOWNMIX_CHANGED = (1 << 3),
2855  HWACCEL_CHANGED = (1 << 4)
2856 };
2857 
2858 static const char *unknown_if_null(const char *str)
2859 {
2860  return str ? str : "unknown";
2861 }
2862 
2864  InputFilter *ifilter, AVFrame *frame)
2865 {
2866  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2867  FrameData *fd;
2868  AVFrameSideData *sd;
2869  int need_reinit = 0, ret;
2870 
2871  /* determine if the parameters for this input changed */
2872  switch (ifp->type) {
2873  case AVMEDIA_TYPE_AUDIO:
2874  if (ifp->format != frame->format ||
2875  ifp->sample_rate != frame->sample_rate ||
2876  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2877  need_reinit |= AUDIO_CHANGED;
2878  break;
2879  case AVMEDIA_TYPE_VIDEO:
2880  if (ifp->format != frame->format ||
2881  ifp->width != frame->width ||
2882  ifp->height != frame->height ||
2883  ifp->color_space != frame->colorspace ||
2884  ifp->color_range != frame->color_range)
2885  need_reinit |= VIDEO_CHANGED;
2886  break;
2887  }
2888 
2890  if (!ifp->displaymatrix_present ||
2891  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2892  need_reinit |= MATRIX_CHANGED;
2893  } else if (ifp->displaymatrix_present)
2894  need_reinit |= MATRIX_CHANGED;
2895 
2897  if (!ifp->downmixinfo_present ||
2898  memcmp(sd->data, &ifp->downmixinfo, sizeof(ifp->downmixinfo)))
2899  need_reinit |= DOWNMIX_CHANGED;
2900  } else if (ifp->downmixinfo_present)
2901  need_reinit |= DOWNMIX_CHANGED;
2902 
2903  if (need_reinit && fgt->graph && (ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)) {
2904  ifp->nb_dropped++;
2905  av_log_once(fg, AV_LOG_WARNING, AV_LOG_DEBUG, &ifp->drop_warned, "Avoiding reinit; dropping frame pts: %s bound for %s\n", av_ts2str(frame->pts), ifilter->name);
2907  return 0;
2908  }
2909 
2910  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2911  need_reinit = 0;
2912 
2913  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2914  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2915  need_reinit |= HWACCEL_CHANGED;
2916 
2917  if (need_reinit) {
2919  if (ret < 0)
2920  return ret;
2921  }
2922 
2923  /* (re)init the graph if possible, otherwise buffer the frame and return */
2924  if (need_reinit || !fgt->graph) {
2925  AVFrame *tmp = av_frame_alloc();
2926 
2927  if (!tmp)
2928  return AVERROR(ENOMEM);
2929 
2930  if (!ifilter_has_all_input_formats(fg)) {
2932 
2933  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2934  if (ret < 0)
2935  av_frame_free(&tmp);
2936 
2937  return ret;
2938  }
2939 
2940  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2941  av_frame_free(&tmp);
2942  if (ret < 0)
2943  return ret;
2944 
2945  if (fgt->graph) {
2946  AVBPrint reason;
2948  if (need_reinit & AUDIO_CHANGED) {
2949  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2950  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2951  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2952  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2953  }
2954  if (need_reinit & VIDEO_CHANGED) {
2955  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2956  const char *color_space_name = av_color_space_name(frame->colorspace);
2957  const char *color_range_name = av_color_range_name(frame->color_range);
2958  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2959  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2960  unknown_if_null(color_space_name), frame->width, frame->height);
2961  }
2962  if (need_reinit & MATRIX_CHANGED)
2963  av_bprintf(&reason, "display matrix changed, ");
2964  if (need_reinit & DOWNMIX_CHANGED)
2965  av_bprintf(&reason, "downmix medatata changed, ");
2966  if (need_reinit & HWACCEL_CHANGED)
2967  av_bprintf(&reason, "hwaccel changed, ");
2968  if (reason.len > 1)
2969  reason.str[reason.len - 2] = '\0'; // remove last comma
2970  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2971  }
2972 
2973  ret = configure_filtergraph(fg, fgt);
2974  if (ret < 0) {
2975  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2976  return ret;
2977  }
2978  }
2979 
2980  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2981  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2982  frame->time_base = ifp->time_base;
2983 
2984  if (ifp->displaymatrix_applied)
2986 
2987  fd = frame_data(frame);
2988  if (!fd)
2989  return AVERROR(ENOMEM);
2991 
2994  if (ret < 0) {
2996  if (ret != AVERROR_EOF)
2997  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2998  return ret;
2999  }
3000 
3001  return 0;
3002 }
3003 
3004 static void fg_thread_set_name(const FilterGraph *fg)
3005 {
3006  char name[16];
3007  if (filtergraph_is_simple(fg)) {
3008  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
3009  snprintf(name, sizeof(name), "%cf%s",
3011  ofp->name);
3012  } else {
3013  snprintf(name, sizeof(name), "fc%d", fg->index);
3014  }
3015 
3017 }
3018 
3020 {
3021  if (fgt->frame_queue_out) {
3022  AVFrame *frame;
3023  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
3024  av_frame_free(&frame);
3026  }
3027 
3028  av_frame_free(&fgt->frame);
3029  av_freep(&fgt->eof_in);
3030  av_freep(&fgt->eof_out);
3031 
3032  avfilter_graph_free(&fgt->graph);
3033 
3034  memset(fgt, 0, sizeof(*fgt));
3035 }
3036 
3037 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
3038 {
3039  memset(fgt, 0, sizeof(*fgt));
3040 
3041  fgt->frame = av_frame_alloc();
3042  if (!fgt->frame)
3043  goto fail;
3044 
3045  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
3046  if (!fgt->eof_in)
3047  goto fail;
3048 
3049  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
3050  if (!fgt->eof_out)
3051  goto fail;
3052 
3054  if (!fgt->frame_queue_out)
3055  goto fail;
3056 
3057  return 0;
3058 
3059 fail:
3060  fg_thread_uninit(fgt);
3061  return AVERROR(ENOMEM);
3062 }
3063 
3064 static int filter_thread(void *arg)
3065 {
3066  FilterGraphPriv *fgp = arg;
3067  FilterGraph *fg = &fgp->fg;
3068 
3069  FilterGraphThread fgt;
3070  int ret = 0, input_status = 0;
3071 
3072  ret = fg_thread_init(&fgt, fg);
3073  if (ret < 0)
3074  goto finish;
3075 
3076  fg_thread_set_name(fg);
3077 
3078  // if we have all input parameters the graph can now be configured
3080  ret = configure_filtergraph(fg, &fgt);
3081  if (ret < 0) {
3082  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
3083  av_err2str(ret));
3084  goto finish;
3085  }
3086  }
3087 
3088  while (1) {
3089  InputFilter *ifilter;
3090  InputFilterPriv *ifp = NULL;
3091  enum FrameOpaque o;
3092  unsigned input_idx = fgt.next_in;
3093 
3094  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
3095  &input_idx, fgt.frame);
3096  if (input_status == AVERROR_EOF) {
3097  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3098  break;
3099  } else if (input_status == AVERROR(EAGAIN)) {
3100  // should only happen when we didn't request any input
3101  av_assert0(input_idx == fg->nb_inputs);
3102  goto read_frames;
3103  }
3104  av_assert0(input_status >= 0);
3105 
3106  o = (intptr_t)fgt.frame->opaque;
3107 
3108  o = (intptr_t)fgt.frame->opaque;
3109 
3110  // message on the control stream
3111  if (input_idx == fg->nb_inputs) {
3112  FilterCommand *fc;
3113 
3114  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3115 
3116  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3117  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3118  fc->all_filters);
3119  av_frame_unref(fgt.frame);
3120  continue;
3121  }
3122 
3123  // we received an input frame or EOF
3124  ifilter = fg->inputs[input_idx];
3125  ifp = ifp_from_ifilter(ifilter);
3126 
3127  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3128  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3129  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3130  !fgt.graph);
3131  } else if (fgt.frame->buf[0]) {
3132  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3133  } else {
3135  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3136  }
3137  av_frame_unref(fgt.frame);
3138  if (ret == AVERROR_EOF) {
3139  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3140  input_idx);
3141  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
3142  continue;
3143  }
3144  if (ret < 0)
3145  goto finish;
3146 
3147 read_frames:
3148  // retrieve all newly avalable frames
3149  ret = read_frames(fg, &fgt, fgt.frame);
3150  if (ret == AVERROR_EOF) {
3151  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3152  if (ifp && ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)
3153  av_log(fg, AV_LOG_INFO, "Total changed input frames dropped : %"PRId64"\n", ifp->nb_dropped);
3154  break;
3155  } else if (ret < 0) {
3156  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3157  av_err2str(ret));
3158  goto finish;
3159  }
3160  }
3161 
3162  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3164 
3165  if (fgt.eof_out[i] || !fgt.graph)
3166  continue;
3167 
3168  ret = fg_output_frame(ofp, &fgt, NULL);
3169  if (ret < 0)
3170  goto finish;
3171  }
3172 
3173 finish:
3174  // EOF is normal termination
3175  if (ret == AVERROR_EOF)
3176  ret = 0;
3177 
3178  fg_thread_uninit(&fgt);
3179 
3180  return ret;
3181 }
3182 
3183 void fg_send_command(FilterGraph *fg, double time, const char *target,
3184  const char *command, const char *arg, int all_filters)
3185 {
3186  FilterGraphPriv *fgp = fgp_from_fg(fg);
3187  AVBufferRef *buf;
3188  FilterCommand *fc;
3189 
3190  fc = av_mallocz(sizeof(*fc));
3191  if (!fc)
3192  return;
3193 
3194  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3195  if (!buf) {
3196  av_freep(&fc);
3197  return;
3198  }
3199 
3200  fc->target = av_strdup(target);
3201  fc->command = av_strdup(command);
3202  fc->arg = av_strdup(arg);
3203  if (!fc->target || !fc->command || !fc->arg) {
3204  av_buffer_unref(&buf);
3205  return;
3206  }
3207 
3208  fc->time = time;
3209  fc->all_filters = all_filters;
3210 
3211  fgp->frame->buf[0] = buf;
3212  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3213 
3214  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3215 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
InputFilterPriv::nb_dropped
uint64_t nb_dropped
Definition: ffmpeg_filter.c:130
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2081
formats
formats
Definition: signature.h:47
AVBufferSrcParameters::side_data
AVFrameSideData ** side_data
Definition: buffersrc.h:124
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1871
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:96
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:450
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:661
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:354
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:628
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:360
av_clip
#define av_clip
Definition: common.h:100
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:122
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2460
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:358
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:240
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:207
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:105
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2086
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2175
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1477
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:66
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:441
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:70
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:98
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:596
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:141
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:982
FrameData
Definition: ffmpeg.h:658
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2150
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:162
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:285
OutputFilter::apad
char * apad
Definition: ffmpeg.h:368
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:205
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:661
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:112
clone_side_data
static int clone_side_data(AVFrameSideData ***dst, int *nb_dst, AVFrameSideData *const *src, int nb_src, unsigned int flags)
Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
Definition: ffmpeg_utils.h:50
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3341
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:380
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:142
AVBufferSrcParameters::nb_side_data
int nb_side_data
Definition: buffersrc.h:125
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:283
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:37
AVFrame::nb_side_data
int nb_side_data
Definition: frame.h:608
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2082
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1009
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:645
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2855
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:61
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:1011
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:143
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1681
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:259
AVSubtitleRect
Definition: avcodec.h:2054
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2085
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1753
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:997
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:182
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:548
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:672
InputFile::index
int index
Definition: ffmpeg.h:474
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:730
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
AVFrame::width
int width
Definition: frame.h:482
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:49
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:56
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:802
AVOption
AVOption.
Definition: opt.h:429
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2497
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:185
FilterGraph::index
int index
Definition: ffmpeg.h:378
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:140
data
const char data[16]
Definition: mxf.c:149
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:186
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:244
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1881
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:381
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2851
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:128
AVDictionary
Definition: dict.c:32
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:654
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:251
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: side_data.c:246
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:262
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1599
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:587
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2580
FilterGraphPriv
Definition: ffmpeg_filter.c:45
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:101
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1916
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:203
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1000
InputStream
Definition: ffmpeg.h:437
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:75
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:69
OutputFilterOptions
Definition: ffmpeg.h:303
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:270
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:291
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:138
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3717
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.c:197
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:3019
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:490
fail
#define fail()
Definition: checkasm.h:193
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
OutputFilterPriv::name
char * name
Definition: ffmpeg_filter.c:205
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:329
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
samplefmt.h
OutputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:217
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:276
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:615
val
static double val(void *priv, double ch)
Definition: aeval.c:77
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:200
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:753
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1702
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
AVDownmixInfo
This structure describes optional metadata relevant to a downmix procedure.
Definition: downmix_info.h:58
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1897
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:88
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:139
FrameData::tb
AVRational tb
Definition: ffmpeg.h:668
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:229
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:74
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:212
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputFilterPriv::sub2video
struct InputFilterPriv::@8 sub2video
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:187
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:365
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:271
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:821
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2719
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
OutputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:218
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:241
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:671
InputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:146
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2788
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:105
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1258
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:334
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:969
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:906
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:265
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:401
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:678
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:68
DOWNMIX_CHANGED
@ DOWNMIX_CHANGED
Definition: ffmpeg_filter.c:2854
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:115
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:382
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:639
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:117
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:733
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:366
InputFilter
Definition: ffmpeg.h:352
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:59
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:493
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:300
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2087
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:3037
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:273
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:282
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:353
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:279
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:246
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1054
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1187
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:235
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:211
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:284
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3657
AVFormatContext
Format I/O context.
Definition: avformat.h:1265
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:640
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:768
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:361
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1295
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:240
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:137
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:895
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:762
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:213
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:838
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:184
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:445
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:160
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:152
Decoder
Definition: ffmpeg.h:423
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:849
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:299
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:907
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1206
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:572
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:119
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:912
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:227
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:653
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2442
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:93
mathops.h
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:71
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:675
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1426
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:930
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1224
AVFilterGraph
Definition: avfilter.h:570
InputFilterPriv::downmixinfo_present
int downmixinfo_present
Definition: ffmpeg_filter.c:156
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:269
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:692
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:136
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:716
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:383
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:234
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:880
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:453
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:104
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:275
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:46
FilterGraphPriv::nb_threads
int nb_threads
Definition: ffmpeg_filter.c:63
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:198
FilterGraph
Definition: ffmpeg.h:376
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:919
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1495
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:287
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:727
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:273
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:575
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2144
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:65
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1991
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:263
f
f
Definition: af_crystalizer.c:122
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:3064
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:150
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:154
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:238
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:86
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:105
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:724
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:573
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:225
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:171
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:138
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2853
FilterCommand::time
double time
Definition: ffmpeg_filter.c:261
InputFilterPriv::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:166
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:153
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1343
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:527
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:57
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:453
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2084
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:102
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:61
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1467
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:189
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:727
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:135
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:500
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1890
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1414
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2852
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2395
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:221
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2858
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:288
decoders
Decoder ** decoders
Definition: ffmpeg.c:113
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:202
nb_decoders
int nb_decoders
Definition: ffmpeg.c:114
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:370
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2657
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2037
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2863
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:954
buffersink.h
av_buffersink_get_side_data
const AVFrameSideData *const * av_buffersink_get_side_data(const AVFilterContext *ctx, int *nb_side_data)
Definition: buffersink.c:367
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:215
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:262
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:194
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:178
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:118
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:546
FPSConvContext
Definition: ffmpeg_filter.c:175
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:110
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:673
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: side_data.c:131
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3183
downmix_info.h
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:51
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:293
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:204
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:421
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1820
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:673
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:192
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:476
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:525
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:79
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:570
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:127
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
IFILTER_FLAG_DROPCHANGED
@ IFILTER_FLAG_DROPCHANGED
Definition: ffmpeg.h:266
AVFrame::side_data
AVFrameSideData ** side_data
Definition: frame.h:607
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:676
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:110
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:307
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:214
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:919
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2505
AVFilter
Filter definition.
Definition: avfilter.h:199
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2319
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:170
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1068
mid_pred
#define mid_pred
Definition: mathops.h:96
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:91
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:745
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:203
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:354
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:176
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:490
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1494
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:890
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:245
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2850
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:507
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:987
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:230
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:482
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:751
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:265
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:386
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:627
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:128
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:913
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:441
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:905
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:148
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:125
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:54
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1443
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:264
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:166
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:932
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2199
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:248
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:236
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:313
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:298
InputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:145
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:931
AVFilterContext
An instance of a filter.
Definition: avfilter.h:257
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:377
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
OutputFilter
Definition: ffmpeg.h:357
InputFilterPriv::drop_warned
int drop_warned
Definition: ffmpeg_filter.c:129
av_log_once
void av_log_once(void *avcl, int initial_level, int subsequent_level, int *state, const char *fmt,...)
Definition: log.c:448
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2741
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:106
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1520
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:129
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:315
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:237
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:491
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2276
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:373
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:78
llrint
#define llrint(x)
Definition: libm.h:396
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:443
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2439
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:257
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: side_data.c:60
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1061
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:248
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:297
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:235
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:426
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:133
InputFilterPriv::end_pts
int64_t end_pts
Definition: ffmpeg_filter.c:163
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:111
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:345
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:616
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:210
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1313
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:108
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:56
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:242
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
InputFilterPriv::downmixinfo
AVDownmixInfo downmixinfo
Definition: ffmpeg_filter.c:157
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:91
bind_inputs
static int bind_inputs(FilterGraph *fg)
Definition: ffmpeg_filter.c:1396
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:573
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:260
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:715
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:459
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:222
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:719
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:211
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:191
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:3004
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:165
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1691
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2083
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:258
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:256
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:135
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3261
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:372
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:76
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:280
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:193