FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 #include "graph/graphprint.h"
25 
26 #include "libavfilter/avfilter.h"
27 #include "libavfilter/buffersink.h"
28 #include "libavfilter/buffersrc.h"
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/bprint.h"
35 #include "libavutil/downmix_info.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavutil/pixfmt.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/timestamp.h"
43 
44 typedef struct FilterGraphPriv {
46 
47  // name used for logging
48  char log_name[32];
49 
50  int is_simple;
51  // true when the filtergraph contains only meta filters
52  // that do not modify the frame data
53  int is_meta;
54  // source filters are present in the graph
57 
58  unsigned nb_outputs_done;
59 
61 
62  // frame for temporarily holding output from the filtergraph
64  // frame for sending output to the encoder
66 
68  unsigned sch_idx;
70 
72 {
73  return (FilterGraphPriv*)fg;
74 }
75 
76 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
77 {
78  return (const FilterGraphPriv*)fg;
79 }
80 
81 // data that is local to the filter thread and not visible outside of it
82 typedef struct FilterGraphThread {
84 
86 
87  // Temporary buffer for output frames, since on filtergraph reset
88  // we cannot send them to encoders immediately.
89  // The output index is stored in frame opaque.
91 
92  // index of the next input to request from the scheduler
93  unsigned next_in;
94  // set to 1 after at least one frame passed through this output
95  int got_frame;
96 
97  // EOF status of each input/output, as received by the thread
98  uint8_t *eof_in;
99  uint8_t *eof_out;
101 
102 typedef struct InputFilterPriv {
104 
106 
107  // used to hold submitted input
109 
110  // For inputs bound to a filtergraph output
112 
113  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
114  // same as type otherwise
116 
117  int eof;
118  int bound;
120  uint64_t nb_dropped;
121 
122  // parameters configured for this input
123  int format;
124 
125  int width, height;
130 
133 
135 
138 
140 
142 
146 
149 
150  struct {
151  AVFrame *frame;
152 
155 
156  /// marks if sub2video_update should force an initialization
157  unsigned int initialize;
158  } sub2video;
160 
162 {
163  return (InputFilterPriv*)ifilter;
164 }
165 
166 typedef struct FPSConvContext {
168  /* number of frames emitted by the video-encoding sync code */
170  /* history of nb_frames_prev, i.e. the number of times the
171  * previous frame was duplicated by vsync code in recent
172  * do_video_out() calls */
174 
175  uint64_t dup_warning;
176 
179 
181 
187 
188 typedef struct OutputFilterPriv {
190 
191  void *log_parent;
192  char log_name[32];
193 
194  int needed;
195 
196  /* desired output stream properties */
197  int format;
198  int width, height;
204 
205  unsigned crop_top;
206  unsigned crop_bottom;
207  unsigned crop_left;
208  unsigned crop_right;
209 
212 
213  // time base in which the output is sent to our downstream
214  // does not need to match the filtersink's timebase
216  // at least one frame with the above timebase was sent
217  // to our downstream, so it cannot change anymore
219 
221 
224 
225  // those are only set if no format is specified and the encoder gives us multiple options
226  // They point directly to the relevant lists of the encoder.
227  union {
228  const enum AVPixelFormat *pix_fmts;
230  };
232  const int *sample_rates;
236 
238 
242  // offset for output timestamps, in AV_TIME_BASE_Q
246 
247  unsigned flags;
249 
251 {
252  return (OutputFilterPriv*)ofilter;
253 }
254 
255 typedef struct FilterCommand {
256  char *target;
257  char *command;
258  char *arg;
259 
260  double time;
262 } FilterCommand;
263 
264 static void filter_command_free(void *opaque, uint8_t *data)
265 {
267 
268  av_freep(&fc->target);
269  av_freep(&fc->command);
270  av_freep(&fc->arg);
271 
272  av_free(data);
273 }
274 
276 {
277  AVFrame *frame = ifp->sub2video.frame;
278  int ret;
279 
281 
282  frame->width = ifp->width;
283  frame->height = ifp->height;
284  frame->format = ifp->format;
285  frame->colorspace = ifp->color_space;
286  frame->color_range = ifp->color_range;
287  frame->alpha_mode = ifp->alpha_mode;
288 
290  if (ret < 0)
291  return ret;
292 
293  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
294 
295  return 0;
296 }
297 
298 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
299  AVSubtitleRect *r)
300 {
301  uint32_t *pal, *dst2;
302  uint8_t *src, *src2;
303  int x, y;
304 
305  if (r->type != SUBTITLE_BITMAP) {
306  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
307  return;
308  }
309  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
310  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
311  r->x, r->y, r->w, r->h, w, h
312  );
313  return;
314  }
315 
316  dst += r->y * dst_linesize + r->x * 4;
317  src = r->data[0];
318  pal = (uint32_t *)r->data[1];
319  for (y = 0; y < r->h; y++) {
320  dst2 = (uint32_t *)dst;
321  src2 = src;
322  for (x = 0; x < r->w; x++)
323  *(dst2++) = pal[*(src2++)];
324  dst += dst_linesize;
325  src += r->linesize[0];
326  }
327 }
328 
330 {
331  AVFrame *frame = ifp->sub2video.frame;
332  int ret;
333 
334  av_assert1(frame->data[0]);
335  ifp->sub2video.last_pts = frame->pts = pts;
339  if (ret != AVERROR_EOF && ret < 0)
341  "Error while add the frame to buffer source(%s).\n",
342  av_err2str(ret));
343 }
344 
345 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
346  const AVSubtitle *sub)
347 {
348  AVFrame *frame = ifp->sub2video.frame;
349  int8_t *dst;
350  int dst_linesize;
351  int num_rects;
352  int64_t pts, end_pts;
353 
354  if (sub) {
355  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
356  AV_TIME_BASE_Q, ifp->time_base);
357  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
358  AV_TIME_BASE_Q, ifp->time_base);
359  num_rects = sub->num_rects;
360  } else {
361  /* If we are initializing the system, utilize current heartbeat
362  PTS as the start time, and show until the following subpicture
363  is received. Otherwise, utilize the previous subpicture's end time
364  as the fall-back value. */
365  pts = ifp->sub2video.initialize ?
366  heartbeat_pts : ifp->sub2video.end_pts;
367  end_pts = INT64_MAX;
368  num_rects = 0;
369  }
370  if (sub2video_get_blank_frame(ifp) < 0) {
372  "Impossible to get a blank canvas.\n");
373  return;
374  }
375  dst = frame->data [0];
376  dst_linesize = frame->linesize[0];
377  for (int i = 0; i < num_rects; i++)
378  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
379  sub2video_push_ref(ifp, pts);
380  ifp->sub2video.end_pts = end_pts;
381  ifp->sub2video.initialize = 0;
382 }
383 
384 /* Define a function for appending a list of allowed formats
385  * to an AVBPrint. If nonempty, the list will have a header. */
386 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
387 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
388 { \
389  if (ofp->var == none && !ofp->supported_list) \
390  return; \
391  av_bprintf(bprint, #name "="); \
392  if (ofp->var != none) { \
393  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
394  } else { \
395  const type *p; \
396  \
397  for (p = ofp->supported_list; *p != none; p++) { \
398  av_bprintf(bprint, printf_format "|", get_name(*p)); \
399  } \
400  if (bprint->len > 0) \
401  bprint->str[--bprint->len] = '\0'; \
402  } \
403  av_bprint_chars(bprint, ':', 1); \
404 }
405 
408 
411 
413  "%d", )
414 
415 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
417 
418 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
420 
421 DEF_CHOOSE_FORMAT(alpha_modes, enum AVAlphaMode, alpha_mode, alpha_modes,
423 
424 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
425 {
426  if (av_channel_layout_check(&ofp->ch_layout)) {
427  av_bprintf(bprint, "channel_layouts=");
428  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
429  } else if (ofp->ch_layouts) {
430  const AVChannelLayout *p;
431 
432  av_bprintf(bprint, "channel_layouts=");
433  for (p = ofp->ch_layouts; p->nb_channels; p++) {
435  av_bprintf(bprint, "|");
436  }
437  if (bprint->len > 0)
438  bprint->str[--bprint->len] = '\0';
439  } else
440  return;
441  av_bprint_chars(bprint, ':', 1);
442 }
443 
444 static int read_binary(void *logctx, const char *path,
445  uint8_t **data, int *len)
446 {
447  AVIOContext *io = NULL;
448  int64_t fsize;
449  int ret;
450 
451  *data = NULL;
452  *len = 0;
453 
454  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
455  if (ret < 0) {
456  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
457  path, av_err2str(ret));
458  return ret;
459  }
460 
461  fsize = avio_size(io);
462  if (fsize < 0 || fsize > INT_MAX) {
463  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
464  ret = AVERROR(EIO);
465  goto fail;
466  }
467 
468  *data = av_malloc(fsize);
469  if (!*data) {
470  ret = AVERROR(ENOMEM);
471  goto fail;
472  }
473 
474  ret = avio_read(io, *data, fsize);
475  if (ret != fsize) {
476  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
477  ret = ret < 0 ? ret : AVERROR(EIO);
478  goto fail;
479  }
480 
481  *len = fsize;
482 
483  ret = 0;
484 fail:
485  avio_close(io);
486  if (ret < 0) {
487  av_freep(data);
488  *len = 0;
489  }
490  return ret;
491 }
492 
493 static int filter_opt_apply(void *logctx, AVFilterContext *f,
494  const char *key, const char *val)
495 {
496  const AVOption *o = NULL;
497  int ret;
498 
500  if (ret >= 0)
501  return 0;
502 
503  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
505  if (!o)
506  goto err_apply;
507 
508  // key is a valid option name prefixed with '/'
509  // interpret value as a path from which to load the actual option value
510  key++;
511 
512  if (o->type == AV_OPT_TYPE_BINARY) {
513  uint8_t *data;
514  int len;
515 
516  ret = read_binary(logctx, val, &data, &len);
517  if (ret < 0)
518  goto err_load;
519 
521  av_freep(&data);
522  } else {
523  char *data = read_file_to_string(val);
524  if (!data) {
525  ret = AVERROR(EIO);
526  goto err_load;
527  }
528 
530  av_freep(&data);
531  }
532  if (ret < 0)
533  goto err_apply;
534 
535  return 0;
536 
537 err_apply:
538  av_log(logctx, AV_LOG_ERROR,
539  "Error applying option '%s' to filter '%s': %s\n",
540  key, f->filter->name, av_err2str(ret));
541  return ret;
542 err_load:
543  av_log(logctx, AV_LOG_ERROR,
544  "Error loading value for option '%s' from file '%s'\n",
545  key, val);
546  return ret;
547 }
548 
549 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
550 {
551  for (size_t i = 0; i < seg->nb_chains; i++) {
552  AVFilterChain *ch = seg->chains[i];
553 
554  for (size_t j = 0; j < ch->nb_filters; j++) {
555  AVFilterParams *p = ch->filters[j];
556  const AVDictionaryEntry *e = NULL;
557 
558  av_assert0(p->filter);
559 
560  while ((e = av_dict_iterate(p->opts, e))) {
561  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
562  if (ret < 0)
563  return ret;
564  }
565 
566  av_dict_free(&p->opts);
567  }
568  }
569 
570  return 0;
571 }
572 
573 static int graph_parse(void *logctx,
574  AVFilterGraph *graph, const char *desc,
576  AVBufferRef *hw_device)
577 {
579  int ret;
580 
581  *inputs = NULL;
582  *outputs = NULL;
583 
584  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
585  if (ret < 0)
586  return ret;
587 
589  if (ret < 0)
590  goto fail;
591 
592  if (hw_device) {
593  for (int i = 0; i < graph->nb_filters; i++) {
594  AVFilterContext *f = graph->filters[i];
595 
596  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
597  continue;
598  f->hw_device_ctx = av_buffer_ref(hw_device);
599  if (!f->hw_device_ctx) {
600  ret = AVERROR(ENOMEM);
601  goto fail;
602  }
603  }
604  }
605 
606  ret = graph_opts_apply(logctx, seg);
607  if (ret < 0)
608  goto fail;
609 
611 
612 fail:
614  return ret;
615 }
616 
617 // Filters can be configured only if the formats of all inputs are known.
619 {
620  for (int i = 0; i < fg->nb_inputs; i++) {
622  if (ifp->format < 0)
623  return 0;
624  }
625  return 1;
626 }
627 
628 static int filter_thread(void *arg);
629 
630 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
631 {
632  AVFilterContext *ctx = inout->filter_ctx;
633  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
634  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
635 
636  if (nb_pads > 1)
637  return av_strdup(ctx->filter->name);
638  return av_asprintf("%s:%s", ctx->filter->name,
639  avfilter_pad_get_name(pads, inout->pad_idx));
640 }
641 
642 static const char *ofilter_item_name(void *obj)
643 {
644  OutputFilterPriv *ofp = obj;
645  return ofp->log_name;
646 }
647 
648 static const AVClass ofilter_class = {
649  .class_name = "OutputFilter",
650  .version = LIBAVUTIL_VERSION_INT,
651  .item_name = ofilter_item_name,
652  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
653  .category = AV_CLASS_CATEGORY_FILTER,
654 };
655 
657 {
658  OutputFilterPriv *ofp;
659  OutputFilter *ofilter;
660 
661  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
662  if (!ofp)
663  return NULL;
664 
665  ofilter = &ofp->ofilter;
666  ofilter->class = &ofilter_class;
667  ofp->log_parent = fg;
668  ofilter->graph = fg;
669  ofilter->type = type;
670  ofp->format = -1;
674  ofilter->index = fg->nb_outputs - 1;
675 
676  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
677  av_get_media_type_string(type)[0], ofilter->index);
678 
679  return ofilter;
680 }
681 
682 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
683  const ViewSpecifier *vs)
684 {
685  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
686  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
688  int ret;
689 
690  av_assert0(!ifp->bound);
691  ifp->bound = 1;
692 
693  if (ifilter->type != ist->par->codec_type &&
694  !(ifilter->type == AVMEDIA_TYPE_VIDEO && ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
695  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
697  return AVERROR(EINVAL);
698  }
699 
700  ifp->type_src = ist->st->codecpar->codec_type;
701 
702  ifp->opts.fallback = av_frame_alloc();
703  if (!ifp->opts.fallback)
704  return AVERROR(ENOMEM);
705 
706  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
707  vs, &ifp->opts, &src);
708  if (ret < 0)
709  return ret;
710 
711  ifilter->input_name = av_strdup(ifp->opts.name);
712  if (!ifilter->input_name)
713  return AVERROR(EINVAL);
714 
715  ret = sch_connect(fgp->sch,
716  src, SCH_FILTER_IN(fgp->sch_idx, ifilter->index));
717  if (ret < 0)
718  return ret;
719 
720  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
721  ifp->sub2video.frame = av_frame_alloc();
722  if (!ifp->sub2video.frame)
723  return AVERROR(ENOMEM);
724 
725  ifp->width = ifp->opts.sub2video_width;
726  ifp->height = ifp->opts.sub2video_height;
727 
728  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
729  palettes for all rectangles are identical or compatible */
730  ifp->format = AV_PIX_FMT_RGB32;
731 
732  ifp->time_base = AV_TIME_BASE_Q;
733 
734  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
735  ifp->width, ifp->height);
736  }
737 
738  return 0;
739 }
740 
742  const ViewSpecifier *vs)
743 {
746  int ret;
747 
748  av_assert0(!ifp->bound);
749  ifp->bound = 1;
750 
751  if (ifp->ifilter.type != dec->type) {
752  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
754  return AVERROR(EINVAL);
755  }
756 
757  ifp->type_src = ifp->ifilter.type;
758 
759  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
760  if (ret < 0)
761  return ret;
762 
763  ifp->ifilter.input_name = av_strdup(ifp->opts.name);
764  if (!ifp->ifilter.input_name)
765  return AVERROR(EINVAL);
766 
767  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
768  if (ret < 0)
769  return ret;
770 
771  return 0;
772 }
773 
774 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
775  const AVChannelLayout *layout_requested)
776 {
777  int i, err;
778 
779  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
780  /* Pass the layout through for all orders but UNSPEC */
781  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
782  if (err < 0)
783  return err;
784  return 0;
785  }
786 
787  /* Requested layout is of order UNSPEC */
788  if (!layouts_allowed) {
789  /* Use the default native layout for the requested amount of channels when the
790  encoder doesn't have a list of supported layouts */
791  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
792  return 0;
793  }
794  /* Encoder has a list of supported layouts. Pick the first layout in it with the
795  same amount of channels as the requested layout */
796  for (i = 0; layouts_allowed[i].nb_channels; i++) {
797  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
798  break;
799  }
800  if (layouts_allowed[i].nb_channels) {
801  /* Use it if one is found */
802  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
803  if (err < 0)
804  return err;
805  return 0;
806  }
807  /* If no layout for the amount of channels requested was found, use the default
808  native layout for it. */
809  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
810 
811  return 0;
812 }
813 
814 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
815  const OutputFilterOptions *opts)
816 {
817  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
818  FilterGraph *fg = ofilter->graph;
819  FilterGraphPriv *fgp = fgp_from_fg(fg);
820  int ret;
821 
822  av_assert0(!ofilter->bound);
823  av_assert0(!opts->enc ||
824  ofilter->type == opts->enc->type);
825 
826  ofp->needed = ofilter->bound = 1;
827  av_freep(&ofilter->linklabel);
828 
829  ofp->flags |= opts->flags;
830  ofp->ts_offset = opts->ts_offset;
831  ofp->enc_timebase = opts->output_tb;
832 
833  ofp->trim_start_us = opts->trim_start_us;
834  ofp->trim_duration_us = opts->trim_duration_us;
835 
836  ofilter->output_name = av_strdup(opts->name);
837  if (!ofilter->output_name)
838  return AVERROR(EINVAL);
839 
840  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
841  if (ret < 0)
842  return ret;
843 
844  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
845  if (ret < 0)
846  return ret;
847 
848  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
849  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
850 
851  if (fgp->is_simple) {
852  // for simple filtergraph there is just one output,
853  // so use only graph-level information for logging
854  ofp->log_parent = NULL;
855  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
856  } else
857  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
858 
859  switch (ofilter->type) {
860  case AVMEDIA_TYPE_VIDEO:
861  ofp->width = opts->width;
862  ofp->height = opts->height;
863  if (opts->format != AV_PIX_FMT_NONE) {
864  ofp->format = opts->format;
865  } else
866  ofp->pix_fmts = opts->pix_fmts;
867 
868  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
869  ofp->color_space = opts->color_space;
870  else
871  ofp->color_spaces = opts->color_spaces;
872 
873  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
874  ofp->color_range = opts->color_range;
875  else
876  ofp->color_ranges = opts->color_ranges;
877 
878  if (opts->alpha_mode != AVALPHA_MODE_UNSPECIFIED)
879  ofp->alpha_mode = opts->alpha_mode;
880  else
881  ofp->alpha_modes = opts->alpha_modes;
882 
884 
885  ofp->fps.last_frame = av_frame_alloc();
886  if (!ofp->fps.last_frame)
887  return AVERROR(ENOMEM);
888 
889  ofp->fps.vsync_method = opts->vsync_method;
890  ofp->fps.framerate = opts->frame_rate;
891  ofp->fps.framerate_max = opts->max_frame_rate;
892  ofp->fps.framerate_supported = opts->frame_rates;
893 
894  // reduce frame rate for mpeg4 to be within the spec limits
895  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
896  ofp->fps.framerate_clip = 65535;
897 
898  ofp->fps.dup_warning = 1000;
899 
900  break;
901  case AVMEDIA_TYPE_AUDIO:
902  if (opts->format != AV_SAMPLE_FMT_NONE) {
903  ofp->format = opts->format;
904  } else {
905  ofp->sample_fmts = opts->sample_fmts;
906  }
907  if (opts->sample_rate) {
908  ofp->sample_rate = opts->sample_rate;
909  } else
910  ofp->sample_rates = opts->sample_rates;
911  if (opts->ch_layout.nb_channels) {
912  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
913  if (ret < 0)
914  return ret;
915  } else {
916  ofp->ch_layouts = opts->ch_layouts;
917  }
918  break;
919  }
920 
921  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofilter->index),
922  SCH_ENC(sched_idx_enc));
923  if (ret < 0)
924  return ret;
925 
926  return 0;
927 }
928 
930  const OutputFilterOptions *opts)
931 {
932  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
933 
934  av_assert0(!ofilter->bound);
935  av_assert0(ofilter->type == ifp->ifilter.type);
936 
937  ofp->needed = ofilter->bound = 1;
938  av_freep(&ofilter->linklabel);
939 
940  ofilter->output_name = av_strdup(opts->name);
941  if (!ofilter->output_name)
942  return AVERROR(EINVAL);
943 
944  ifp->ofilter_src = ofilter;
945 
946  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
947 
948  return 0;
949 }
950 
951 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
952 {
954  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
956  char name[32];
957  int ret;
958 
959  av_assert0(!ifp->bound);
960  ifp->bound = 1;
961 
962  if (ifp->ifilter.type != ofilter_src->type) {
963  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
964  av_get_media_type_string(ofilter_src->type),
966  return AVERROR(EINVAL);
967  }
968 
969  ifp->type_src = ifp->ifilter.type;
970 
971  memset(&opts, 0, sizeof(opts));
972 
973  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->ifilter.index);
974  opts.name = name;
975 
976  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
977  if (ret < 0)
978  return ret;
979 
980  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
981  SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
982  if (ret < 0)
983  return ret;
984 
985  return 0;
986 }
987 
989 {
990  InputFilterPriv *ifp;
991  InputFilter *ifilter;
992 
993  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
994  if (!ifp)
995  return NULL;
996 
997  ifilter = &ifp->ifilter;
998  ifilter->graph = fg;
999 
1000  ifp->frame = av_frame_alloc();
1001  if (!ifp->frame)
1002  return NULL;
1003 
1004  ifilter->index = fg->nb_inputs - 1;
1005  ifp->format = -1;
1009 
1011  if (!ifp->frame_queue)
1012  return NULL;
1013 
1014  return ifilter;
1015 }
1016 
1018 {
1019  FilterGraph *fg = *pfg;
1020  FilterGraphPriv *fgp;
1021 
1022  if (!fg)
1023  return;
1024  fgp = fgp_from_fg(fg);
1025 
1026  for (int j = 0; j < fg->nb_inputs; j++) {
1027  InputFilter *ifilter = fg->inputs[j];
1028  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1029 
1030  if (ifp->frame_queue) {
1031  AVFrame *frame;
1032  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1033  av_frame_free(&frame);
1034  av_fifo_freep2(&ifp->frame_queue);
1035  }
1036  av_frame_free(&ifp->sub2video.frame);
1037 
1038  av_frame_free(&ifp->frame);
1039  av_frame_free(&ifp->opts.fallback);
1040 
1042  av_freep(&ifilter->linklabel);
1043  av_freep(&ifp->opts.name);
1045  av_freep(&ifilter->name);
1046  av_freep(&ifilter->input_name);
1047  av_freep(&fg->inputs[j]);
1048  }
1049  av_freep(&fg->inputs);
1050  for (int j = 0; j < fg->nb_outputs; j++) {
1051  OutputFilter *ofilter = fg->outputs[j];
1052  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1053 
1054  av_frame_free(&ofp->fps.last_frame);
1055  av_dict_free(&ofp->sws_opts);
1056  av_dict_free(&ofp->swr_opts);
1057 
1058  av_freep(&ofilter->linklabel);
1059  av_freep(&ofilter->name);
1060  av_freep(&ofilter->output_name);
1061  av_freep(&ofilter->apad);
1064  av_freep(&fg->outputs[j]);
1065  }
1066  av_freep(&fg->outputs);
1067  av_freep(&fg->graph_desc);
1068 
1069  av_frame_free(&fgp->frame);
1070  av_frame_free(&fgp->frame_enc);
1071 
1072  av_freep(pfg);
1073 }
1074 
1075 static const char *fg_item_name(void *obj)
1076 {
1077  const FilterGraphPriv *fgp = obj;
1078 
1079  return fgp->log_name;
1080 }
1081 
1082 static const AVClass fg_class = {
1083  .class_name = "FilterGraph",
1084  .version = LIBAVUTIL_VERSION_INT,
1085  .item_name = fg_item_name,
1086  .category = AV_CLASS_CATEGORY_FILTER,
1087 };
1088 
1089 int fg_create(FilterGraph **pfg, char **graph_desc, Scheduler *sch,
1090  const OutputFilterOptions *opts)
1091 {
1092  FilterGraphPriv *fgp;
1093  FilterGraph *fg;
1094 
1096  AVFilterGraph *graph;
1097  int ret = 0;
1098 
1099  fgp = av_mallocz(sizeof(*fgp));
1100  if (!fgp) {
1101  av_freep(graph_desc);
1102  return AVERROR(ENOMEM);
1103  }
1104  fg = &fgp->fg;
1105 
1106  if (pfg) {
1107  *pfg = fg;
1108  fg->index = -1;
1109  } else {
1111  if (ret < 0) {
1112  av_freep(graph_desc);
1113  av_freep(&fgp);
1114  return ret;
1115  }
1116 
1117  fg->index = nb_filtergraphs - 1;
1118  }
1119 
1120  fg->class = &fg_class;
1121  fg->graph_desc = *graph_desc;
1123  fgp->nb_threads = -1;
1124  fgp->sch = sch;
1125 
1126  *graph_desc = NULL;
1127 
1128  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1129 
1130  fgp->frame = av_frame_alloc();
1131  fgp->frame_enc = av_frame_alloc();
1132  if (!fgp->frame || !fgp->frame_enc)
1133  return AVERROR(ENOMEM);
1134 
1135  /* this graph is only used for determining the kinds of inputs
1136  * and outputs we have, and is discarded on exit from this function */
1137  graph = avfilter_graph_alloc();
1138  if (!graph)
1139  return AVERROR(ENOMEM);;
1140  graph->nb_threads = 1;
1141 
1142  ret = graph_parse(fg, graph, fg->graph_desc, &inputs, &outputs,
1144  if (ret < 0)
1145  goto fail;
1146 
1147  for (unsigned i = 0; i < graph->nb_filters; i++) {
1148  const AVFilter *f = graph->filters[i]->filter;
1149  if ((!avfilter_filter_pad_count(f, 0) &&
1150  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1151  !strcmp(f->name, "apad")) {
1152  fgp->have_sources = 1;
1153  break;
1154  }
1155  }
1156 
1157  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1158  InputFilter *const ifilter = ifilter_alloc(fg);
1159 
1160  if (!ifilter) {
1161  ret = AVERROR(ENOMEM);
1162  goto fail;
1163  }
1164 
1165  ifilter->linklabel = cur->name;
1166  cur->name = NULL;
1167 
1168  ifilter->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1169  cur->pad_idx);
1170 
1171  if (ifilter->type != AVMEDIA_TYPE_VIDEO && ifilter->type != AVMEDIA_TYPE_AUDIO) {
1172  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1173  "currently.\n");
1174  ret = AVERROR(ENOSYS);
1175  goto fail;
1176  }
1177 
1178  ifilter->name = describe_filter_link(fg, cur, 1);
1179  if (!ifilter->name) {
1180  ret = AVERROR(ENOMEM);
1181  goto fail;
1182  }
1183  }
1184 
1185  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1186  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1187  cur->pad_idx);
1188  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1189  OutputFilterPriv *ofp;
1190 
1191  if (!ofilter) {
1192  ret = AVERROR(ENOMEM);
1193  goto fail;
1194  }
1195  ofp = ofp_from_ofilter(ofilter);
1196 
1197  ofilter->linklabel = cur->name;
1198  cur->name = NULL;
1199 
1200  ofilter->name = describe_filter_link(fg, cur, 0);
1201  if (!ofilter->name) {
1202  ret = AVERROR(ENOMEM);
1203  goto fail;
1204  }
1205 
1206  // opts should only be needed in this function to fill fields from filtergraphs
1207  // whose output is meant to be treated as if it was stream, e.g. merged HEIF
1208  // tile groups.
1209  if (opts) {
1210  ofp->flags = opts->flags;
1211  ofp->side_data = opts->side_data;
1212  ofp->nb_side_data = opts->nb_side_data;
1213 
1214  ofp->crop_top = opts->crop_top;
1215  ofp->crop_bottom = opts->crop_bottom;
1216  ofp->crop_left = opts->crop_left;
1217  ofp->crop_right = opts->crop_right;
1218 
1221  if (sd)
1222  memcpy(ofp->displaymatrix, sd->data, sizeof(ofp->displaymatrix));
1223  }
1224  }
1225 
1226  if (!fg->nb_outputs) {
1227  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1228  ret = AVERROR(ENOSYS);
1229  goto fail;
1230  }
1231 
1232  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1233  filter_thread, fgp);
1234  if (ret < 0)
1235  goto fail;
1236  fgp->sch_idx = ret;
1237 
1238 fail:
1241  avfilter_graph_free(&graph);
1242 
1243  if (ret < 0)
1244  return ret;
1245 
1246  return 0;
1247 }
1248 
1250  InputStream *ist,
1251  char **graph_desc,
1252  Scheduler *sch, unsigned sched_idx_enc,
1253  const OutputFilterOptions *opts)
1254 {
1255  const enum AVMediaType type = ist->par->codec_type;
1256  FilterGraph *fg;
1257  FilterGraphPriv *fgp;
1258  int ret;
1259 
1260  ret = fg_create(pfg, graph_desc, sch, NULL);
1261  if (ret < 0)
1262  return ret;
1263  fg = *pfg;
1264  fgp = fgp_from_fg(fg);
1265 
1266  fgp->is_simple = 1;
1267 
1268  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1269  av_get_media_type_string(type)[0], opts->name);
1270 
1271  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1272  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1273  "to have exactly 1 input and 1 output. "
1274  "However, it had %d input(s) and %d output(s). Please adjust, "
1275  "or use a complex filtergraph (-filter_complex) instead.\n",
1276  *graph_desc, fg->nb_inputs, fg->nb_outputs);
1277  return AVERROR(EINVAL);
1278  }
1279  if (fg->outputs[0]->type != type) {
1280  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1281  "it to %s output stream\n",
1284  return AVERROR(EINVAL);
1285  }
1286 
1287  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1288  if (ret < 0)
1289  return ret;
1290 
1291  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1292  if (ret < 0)
1293  return ret;
1294 
1295  if (opts->nb_threads >= 0)
1296  fgp->nb_threads = opts->nb_threads;
1297 
1298  return 0;
1299 }
1300 
1301 static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter, int commit)
1302 {
1303  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1304  InputStream *ist = NULL;
1305  enum AVMediaType type = ifilter->type;
1307  const char *spec;
1308  char *p;
1309  int i, ret;
1310 
1311  if (ifilter->linklabel && !strncmp(ifilter->linklabel, "dec:", 4)) {
1312  // bind to a standalone decoder
1313  int dec_idx;
1314 
1315  dec_idx = strtol(ifilter->linklabel + 4, &p, 0);
1316  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1317  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1318  dec_idx, fg->graph_desc);
1319  return AVERROR(EINVAL);
1320  }
1321 
1322  if (type == AVMEDIA_TYPE_VIDEO) {
1323  spec = *p == ':' ? p + 1 : p;
1324  ret = view_specifier_parse(&spec, &vs);
1325  if (ret < 0)
1326  return ret;
1327  }
1328 
1329  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1330  if (ret < 0)
1331  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1332  ifilter->name);
1333  return ret;
1334  } else if (ifilter->linklabel) {
1336  AVFormatContext *s;
1337  AVStream *st = NULL;
1338  int file_idx;
1339 
1340  // try finding an unbound filtergraph output with this label
1341  for (int i = 0; i < nb_filtergraphs; i++) {
1342  FilterGraph *fg_src = filtergraphs[i];
1343 
1344  if (fg == fg_src)
1345  continue;
1346 
1347  for (int j = 0; j < fg_src->nb_outputs; j++) {
1348  OutputFilter *ofilter = fg_src->outputs[j];
1349 
1350  if (!ofilter->bound && ofilter->linklabel &&
1351  !strcmp(ofilter->linklabel, ifilter->linklabel)) {
1352  if (commit) {
1353  av_log(fg, AV_LOG_VERBOSE,
1354  "Binding input with label '%s' to filtergraph output %d:%d\n",
1355  ifilter->linklabel, i, j);
1356 
1357  ret = ifilter_bind_fg(ifp, fg_src, j);
1358  if (ret < 0) {
1359  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1360  ifilter->linklabel);
1361  return ret;
1362  }
1363  } else
1364  ofp_from_ofilter(ofilter)->needed = 1;
1365  return 0;
1366  }
1367  }
1368  }
1369 
1370  // bind to an explicitly specified demuxer stream
1371  file_idx = strtol(ifilter->linklabel, &p, 0);
1372  if (file_idx < 0 || file_idx >= nb_input_files) {
1373  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1374  file_idx, fg->graph_desc);
1375  return AVERROR(EINVAL);
1376  }
1377  s = input_files[file_idx]->ctx;
1378 
1379  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1380  if (ret < 0) {
1381  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1382  return ret;
1383  }
1384 
1385  if (type == AVMEDIA_TYPE_VIDEO) {
1386  spec = ss.remainder ? ss.remainder : "";
1387  ret = view_specifier_parse(&spec, &vs);
1388  if (ret < 0) {
1390  return ret;
1391  }
1392  }
1393 
1394  for (i = 0; i < s->nb_streams; i++) {
1395  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1396  if (stream_type != type &&
1397  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1398  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1399  continue;
1400  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1401  st = s->streams[i];
1402  break;
1403  }
1404  }
1406  if (!st) {
1407  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1408  "matches no streams.\n", p, fg->graph_desc);
1409  return AVERROR(EINVAL);
1410  }
1411  ist = input_files[file_idx]->streams[st->index];
1412 
1413  if (commit)
1414  av_log(fg, AV_LOG_VERBOSE,
1415  "Binding input with label '%s' to input stream %d:%d\n",
1416  ifilter->linklabel, ist->file->index, ist->index);
1417  } else {
1418  // try finding an unbound filtergraph output
1419  for (int i = 0; i < nb_filtergraphs; i++) {
1420  FilterGraph *fg_src = filtergraphs[i];
1421 
1422  if (fg == fg_src)
1423  continue;
1424 
1425  for (int j = 0; j < fg_src->nb_outputs; j++) {
1426  OutputFilter *ofilter = fg_src->outputs[j];
1427 
1428  if (!ofilter->bound) {
1429  if (commit) {
1430  av_log(fg, AV_LOG_VERBOSE,
1431  "Binding unlabeled filtergraph input to filtergraph output %d:%d\n", i, j);
1432 
1433  ret = ifilter_bind_fg(ifp, fg_src, j);
1434  if (ret < 0) {
1435  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %d:%d\n", i, j);
1436  return ret;
1437  }
1438  } else
1439  ofp_from_ofilter(ofilter)->needed = 1;
1440  return 0;
1441  }
1442  }
1443  }
1444 
1445  ist = ist_find_unused(type);
1446  if (!ist) {
1447  av_log(fg, AV_LOG_FATAL,
1448  "Cannot find an unused %s input stream to feed the "
1449  "unlabeled input pad %s.\n",
1450  av_get_media_type_string(type), ifilter->name);
1451  return AVERROR(EINVAL);
1452  }
1453 
1454  if (commit)
1455  av_log(fg, AV_LOG_VERBOSE,
1456  "Binding unlabeled input %d to input stream %d:%d\n",
1457  ifilter->index, ist->file->index, ist->index);
1458  }
1459  av_assert0(ist);
1460 
1461  if (commit) {
1462  ret = ifilter_bind_ist(ifilter, ist, &vs);
1463  if (ret < 0) {
1464  av_log(fg, AV_LOG_ERROR,
1465  "Error binding an input stream to complex filtergraph input %s.\n",
1466  ifilter->name);
1467  return ret;
1468  }
1469  }
1470 
1471  return 0;
1472 }
1473 
1474 static int bind_inputs(FilterGraph *fg, int commit)
1475 {
1476  // bind filtergraph inputs to input streams or other filtergraphs
1477  for (int i = 0; i < fg->nb_inputs; i++) {
1479  int ret;
1480 
1481  if (ifp->bound)
1482  continue;
1483 
1484  ret = fg_complex_bind_input(fg, &ifp->ifilter, commit);
1485  if (ret < 0)
1486  return ret;
1487  }
1488 
1489  return 0;
1490 }
1491 
1493 {
1494  int ret;
1495 
1496  for (int i = 0; i < nb_filtergraphs; i++) {
1497  ret = bind_inputs(filtergraphs[i], 0);
1498  if (ret < 0)
1499  return ret;
1500  }
1501 
1502  // check that all outputs were bound
1503  for (int i = nb_filtergraphs - 1; i >= 0; i--) {
1504  FilterGraph *fg = filtergraphs[i];
1506 
1507  for (int j = 0; j < fg->nb_outputs; j++) {
1508  OutputFilter *output = fg->outputs[j];
1509  if (!ofp_from_ofilter(output)->needed) {
1510  if (!fg->is_internal) {
1511  av_log(fg, AV_LOG_FATAL,
1512  "Filter '%s' has output %d (%s) unconnected\n",
1513  output->name, j,
1514  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1515  return AVERROR(EINVAL);
1516  }
1517 
1518  av_log(fg, AV_LOG_DEBUG,
1519  "Internal filter '%s' has output %d (%s) unconnected. Removing graph\n",
1520  output->name, j,
1521  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1522  sch_remove_filtergraph(fgp->sch, fgp->sch_idx);
1523  fg_free(&filtergraphs[i]);
1524  nb_filtergraphs--;
1525  if (nb_filtergraphs > 0)
1526  memmove(&filtergraphs[i],
1527  &filtergraphs[i + 1],
1528  (nb_filtergraphs - i) * sizeof(*filtergraphs));
1529  break;
1530  }
1531  }
1532  }
1533 
1534  for (int i = 0; i < nb_filtergraphs; i++) {
1535  ret = bind_inputs(filtergraphs[i], 1);
1536  if (ret < 0)
1537  return ret;
1538  }
1539 
1540  return 0;
1541 }
1542 
1543 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1544  AVFilterContext **last_filter, int *pad_idx,
1545  const char *filter_name)
1546 {
1547  AVFilterGraph *graph = (*last_filter)->graph;
1549  const AVFilter *trim;
1550  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1551  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1552  int ret = 0;
1553 
1554  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1555  return 0;
1556 
1557  trim = avfilter_get_by_name(name);
1558  if (!trim) {
1559  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1560  "recording time.\n", name);
1561  return AVERROR_FILTER_NOT_FOUND;
1562  }
1563 
1564  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1565  if (!ctx)
1566  return AVERROR(ENOMEM);
1567 
1568  if (duration != INT64_MAX) {
1569  ret = av_opt_set_int(ctx, "durationi", duration,
1571  }
1572  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1573  ret = av_opt_set_int(ctx, "starti", start_time,
1575  }
1576  if (ret < 0) {
1577  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1578  return ret;
1579  }
1580 
1582  if (ret < 0)
1583  return ret;
1584 
1585  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1586  if (ret < 0)
1587  return ret;
1588 
1589  *last_filter = ctx;
1590  *pad_idx = 0;
1591  return 0;
1592 }
1593 
1594 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1595  const char *filter_name, const char *args)
1596 {
1597  AVFilterGraph *graph = (*last_filter)->graph;
1598  const AVFilter *filter = avfilter_get_by_name(filter_name);
1600  int ret;
1601 
1602  if (!filter)
1603  return AVERROR_BUG;
1604 
1606  filter,
1607  filter_name, args, NULL, graph);
1608  if (ret < 0)
1609  return ret;
1610 
1611  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1612  if (ret < 0)
1613  return ret;
1614 
1615  *last_filter = ctx;
1616  *pad_idx = 0;
1617  return 0;
1618 }
1619 
1621  OutputFilter *ofilter, AVFilterInOut *out)
1622 {
1623  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1624  AVFilterContext *last_filter = out->filter_ctx;
1625  AVBPrint bprint;
1626  int pad_idx = out->pad_idx;
1627  int ret;
1628  char name[255];
1629 
1630  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1632  avfilter_get_by_name("buffersink"),
1633  name, NULL, NULL, graph);
1634 
1635  if (ret < 0)
1636  return ret;
1637 
1638  if (ofp->flags & OFILTER_FLAG_CROP) {
1639  char crop_buf[64];
1640  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1641  ofp->crop_left, ofp->crop_right,
1642  ofp->crop_top, ofp->crop_bottom,
1643  ofp->crop_left, ofp->crop_top);
1644  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1645  if (ret < 0)
1646  return ret;
1647  }
1648 
1649  if (ofp->flags & OFILTER_FLAG_AUTOROTATE) {
1650  int32_t *displaymatrix = ofp->displaymatrix;
1651  double theta;
1652 
1653  theta = get_rotation(displaymatrix);
1654 
1655  if (fabs(theta - 90) < 1.0) {
1656  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1657  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1658  } else if (fabs(theta - 180) < 1.0) {
1659  if (displaymatrix[0] < 0) {
1660  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1661  if (ret < 0)
1662  return ret;
1663  }
1664  if (displaymatrix[4] < 0) {
1665  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1666  }
1667  } else if (fabs(theta - 270) < 1.0) {
1668  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1669  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1670  } else if (fabs(theta) > 1.0) {
1671  char rotate_buf[64];
1672  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1673  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1674  } else if (fabs(theta) < 1.0) {
1675  if (displaymatrix && displaymatrix[4] < 0) {
1676  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1677  }
1678  }
1679  if (ret < 0)
1680  return ret;
1681 
1683  }
1684 
1685  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE) &&
1686  // skip add scale for hardware format
1687  !(ofp->format != AV_PIX_FMT_NONE &&
1689  char args[255];
1691  const AVDictionaryEntry *e = NULL;
1692 
1693  snprintf(args, sizeof(args), "%d:%d",
1694  ofp->width, ofp->height);
1695 
1696  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1697  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1698  }
1699 
1700  snprintf(name, sizeof(name), "scaler_out_%s", ofilter->output_name);
1702  name, args, NULL, graph)) < 0)
1703  return ret;
1704  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1705  return ret;
1706 
1707  last_filter = filter;
1708  pad_idx = 0;
1709  }
1710 
1712  ofp->format != AV_PIX_FMT_NONE || !ofp->pix_fmts);
1714  choose_pix_fmts(ofp, &bprint);
1715  choose_color_spaces(ofp, &bprint);
1716  choose_color_ranges(ofp, &bprint);
1717  choose_alpha_modes(ofp, &bprint);
1718  if (!av_bprint_is_complete(&bprint))
1719  return AVERROR(ENOMEM);
1720 
1721  if (bprint.len) {
1723 
1725  avfilter_get_by_name("format"),
1726  "format", bprint.str, NULL, graph);
1727  av_bprint_finalize(&bprint, NULL);
1728  if (ret < 0)
1729  return ret;
1730  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1731  return ret;
1732 
1733  last_filter = filter;
1734  pad_idx = 0;
1735  }
1736 
1737  snprintf(name, sizeof(name), "trim_out_%s", ofilter->output_name);
1738  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1739  &last_filter, &pad_idx, name);
1740  if (ret < 0)
1741  return ret;
1742 
1743 
1744  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1745  return ret;
1746 
1747  return 0;
1748 }
1749 
1751  OutputFilter *ofilter, AVFilterInOut *out)
1752 {
1753  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1754  AVFilterContext *last_filter = out->filter_ctx;
1755  int pad_idx = out->pad_idx;
1756  AVBPrint args;
1757  char name[255];
1758  int ret;
1759 
1760  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1762  avfilter_get_by_name("abuffersink"),
1763  name, NULL, NULL, graph);
1764  if (ret < 0)
1765  return ret;
1766 
1767 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1768  AVFilterContext *filt_ctx; \
1769  \
1770  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1771  "similarly to -af " filter_name "=%s.\n", arg); \
1772  \
1773  ret = avfilter_graph_create_filter(&filt_ctx, \
1774  avfilter_get_by_name(filter_name), \
1775  filter_name, arg, NULL, graph); \
1776  if (ret < 0) \
1777  goto fail; \
1778  \
1779  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1780  if (ret < 0) \
1781  goto fail; \
1782  \
1783  last_filter = filt_ctx; \
1784  pad_idx = 0; \
1785 } while (0)
1787 
1788  choose_sample_fmts(ofp, &args);
1789  choose_sample_rates(ofp, &args);
1790  choose_channel_layouts(ofp, &args);
1791  if (!av_bprint_is_complete(&args)) {
1792  ret = AVERROR(ENOMEM);
1793  goto fail;
1794  }
1795  if (args.len) {
1797 
1798  snprintf(name, sizeof(name), "format_out_%s", ofilter->output_name);
1800  avfilter_get_by_name("aformat"),
1801  name, args.str, NULL, graph);
1802  if (ret < 0)
1803  goto fail;
1804 
1805  ret = avfilter_link(last_filter, pad_idx, format, 0);
1806  if (ret < 0)
1807  goto fail;
1808 
1809  last_filter = format;
1810  pad_idx = 0;
1811  }
1812 
1813  if (ofilter->apad) {
1814  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1815  fgp->have_sources = 1;
1816  }
1817 
1818  snprintf(name, sizeof(name), "trim for output %s", ofilter->output_name);
1819  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1820  &last_filter, &pad_idx, name);
1821  if (ret < 0)
1822  goto fail;
1823 
1824  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1825  goto fail;
1826 fail:
1827  av_bprint_finalize(&args, NULL);
1828 
1829  return ret;
1830 }
1831 
1833  OutputFilter *ofilter, AVFilterInOut *out)
1834 {
1835  switch (ofilter->type) {
1836  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1837  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1838  default: av_assert0(0); return 0;
1839  }
1840 }
1841 
1843 {
1844  ifp->sub2video.last_pts = INT64_MIN;
1845  ifp->sub2video.end_pts = INT64_MIN;
1846 
1847  /* sub2video structure has been (re-)initialized.
1848  Mark it as such so that the system will be
1849  initialized with the first received heartbeat. */
1850  ifp->sub2video.initialize = 1;
1851 }
1852 
1854  InputFilter *ifilter, AVFilterInOut *in)
1855 {
1856  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1857 
1858  AVFilterContext *last_filter;
1859  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1860  const AVPixFmtDescriptor *desc;
1861  char name[255];
1862  int ret, pad_idx = 0;
1864  if (!par)
1865  return AVERROR(ENOMEM);
1866 
1867  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1868  sub2video_prepare(ifp);
1869 
1870  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1871  ifp->opts.name);
1872 
1873  ifilter->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1874  if (!ifilter->filter) {
1875  ret = AVERROR(ENOMEM);
1876  goto fail;
1877  }
1878 
1879  par->format = ifp->format;
1880  par->time_base = ifp->time_base;
1881  par->frame_rate = ifp->opts.framerate;
1882  par->width = ifp->width;
1883  par->height = ifp->height;
1884  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1885  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1886  par->color_space = ifp->color_space;
1887  par->color_range = ifp->color_range;
1888  par->alpha_mode = ifp->alpha_mode;
1889  par->hw_frames_ctx = ifp->hw_frames_ctx;
1890  par->side_data = ifp->side_data;
1891  par->nb_side_data = ifp->nb_side_data;
1892 
1893  ret = av_buffersrc_parameters_set(ifilter->filter, par);
1894  if (ret < 0)
1895  goto fail;
1896  av_freep(&par);
1897 
1898  ret = avfilter_init_dict(ifilter->filter, NULL);
1899  if (ret < 0)
1900  goto fail;
1901 
1902  last_filter = ifilter->filter;
1903 
1905  av_assert0(desc);
1906 
1907  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1908  char crop_buf[64];
1909  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1910  ifp->opts.crop_left, ifp->opts.crop_right,
1911  ifp->opts.crop_top, ifp->opts.crop_bottom,
1912  ifp->opts.crop_left, ifp->opts.crop_top);
1913  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1914  if (ret < 0)
1915  return ret;
1916  }
1917 
1918  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1919  ifp->displaymatrix_applied = 0;
1920  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1921  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1922  int32_t *displaymatrix = ifp->displaymatrix;
1923  double theta;
1924 
1925  theta = get_rotation(displaymatrix);
1926 
1927  if (fabs(theta - 90) < 1.0) {
1928  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1929  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1930  } else if (fabs(theta - 180) < 1.0) {
1931  if (displaymatrix[0] < 0) {
1932  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1933  if (ret < 0)
1934  return ret;
1935  }
1936  if (displaymatrix[4] < 0) {
1937  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1938  }
1939  } else if (fabs(theta - 270) < 1.0) {
1940  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1941  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1942  } else if (fabs(theta) > 1.0) {
1943  char rotate_buf[64];
1944  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1945  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1946  } else if (fabs(theta) < 1.0) {
1947  if (displaymatrix && displaymatrix[4] < 0) {
1948  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1949  }
1950  }
1951  if (ret < 0)
1952  return ret;
1953 
1954  ifp->displaymatrix_applied = 1;
1955  }
1956 
1957  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1958  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1959  &last_filter, &pad_idx, name);
1960  if (ret < 0)
1961  return ret;
1962 
1963  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1964  return ret;
1965  return 0;
1966 fail:
1967  av_freep(&par);
1968 
1969  return ret;
1970 }
1971 
1973  InputFilter *ifilter, AVFilterInOut *in)
1974 {
1975  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1976  AVFilterContext *last_filter;
1977  AVBufferSrcParameters *par;
1978  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1979  AVBPrint args;
1980  char name[255];
1981  int ret, pad_idx = 0;
1982 
1984  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1985  ifp->time_base.num, ifp->time_base.den,
1986  ifp->sample_rate,
1988  if (av_channel_layout_check(&ifp->ch_layout) &&
1990  av_bprintf(&args, ":channel_layout=");
1992  } else
1993  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1994  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1995 
1996  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
1997  name, args.str, NULL,
1998  graph)) < 0)
1999  return ret;
2001  if (!par)
2002  return AVERROR(ENOMEM);
2003  par->side_data = ifp->side_data;
2004  par->nb_side_data = ifp->nb_side_data;
2005  ret = av_buffersrc_parameters_set(ifilter->filter, par);
2006  av_free(par);
2007  if (ret < 0)
2008  return ret;
2009  last_filter = ifilter->filter;
2010 
2011  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
2012  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
2013  &last_filter, &pad_idx, name);
2014  if (ret < 0)
2015  return ret;
2016 
2017  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
2018  return ret;
2019 
2020  return 0;
2021 }
2022 
2024  InputFilter *ifilter, AVFilterInOut *in)
2025 {
2026  switch (ifilter->type) {
2027  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
2028  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
2029  default: av_assert0(0); return 0;
2030  }
2031 }
2032 
2034 {
2035  for (int i = 0; i < fg->nb_outputs; i++)
2036  fg->outputs[i]->filter = NULL;
2037  for (int i = 0; i < fg->nb_inputs; i++)
2038  fg->inputs[i]->filter = NULL;
2039  avfilter_graph_free(&fgt->graph);
2040 }
2041 
2043 {
2044  return f->nb_inputs == 0 &&
2045  (!strcmp(f->filter->name, "buffer") ||
2046  !strcmp(f->filter->name, "abuffer"));
2047 }
2048 
2049 static int graph_is_meta(AVFilterGraph *graph)
2050 {
2051  for (unsigned i = 0; i < graph->nb_filters; i++) {
2052  const AVFilterContext *f = graph->filters[i];
2053 
2054  /* in addition to filters flagged as meta, also
2055  * disregard sinks and buffersources (but not other sources,
2056  * since they introduce data we are not aware of)
2057  */
2058  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
2059  f->nb_outputs == 0 ||
2061  return 0;
2062  }
2063  return 1;
2064 }
2065 
2066 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
2067 
2069 {
2070  FilterGraphPriv *fgp = fgp_from_fg(fg);
2071  AVBufferRef *hw_device;
2072  AVFilterInOut *inputs, *outputs, *cur;
2073  int ret = AVERROR_BUG, i, simple = filtergraph_is_simple(fg);
2074  int have_input_eof = 0;
2075  const char *graph_desc = fg->graph_desc;
2076 
2077  cleanup_filtergraph(fg, fgt);
2078  fgt->graph = avfilter_graph_alloc();
2079  if (!fgt->graph)
2080  return AVERROR(ENOMEM);
2081 
2082  if (simple) {
2083  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2084 
2085  if (filter_nbthreads) {
2086  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
2087  if (ret < 0)
2088  goto fail;
2089  } else if (fgp->nb_threads >= 0) {
2090  ret = av_opt_set_int(fgt->graph, "threads", fgp->nb_threads, 0);
2091  if (ret < 0)
2092  return ret;
2093  }
2094 
2095  if (av_dict_count(ofp->sws_opts)) {
2097  &fgt->graph->scale_sws_opts,
2098  '=', ':');
2099  if (ret < 0)
2100  goto fail;
2101  }
2102 
2103  if (av_dict_count(ofp->swr_opts)) {
2104  char *args;
2105  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
2106  if (ret < 0)
2107  goto fail;
2108  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
2109  av_free(args);
2110  }
2111  } else {
2113  }
2114 
2115  if (filter_buffered_frames) {
2116  ret = av_opt_set_int(fgt->graph, "max_buffered_frames", filter_buffered_frames, 0);
2117  if (ret < 0)
2118  return ret;
2119  }
2120 
2121  hw_device = hw_device_for_filter();
2122 
2123  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
2124  if (ret < 0)
2125  goto fail;
2126 
2127  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
2128  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
2131  goto fail;
2132  }
2134 
2135  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
2136  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
2137  if (ret < 0) {
2139  goto fail;
2140  }
2141  }
2143 
2144  if (fgp->disable_conversions)
2146  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
2147  goto fail;
2148 
2149  fgp->is_meta = graph_is_meta(fgt->graph);
2150 
2151  /* limit the lists of allowed formats to the ones selected, to
2152  * make sure they stay the same if the filtergraph is reconfigured later */
2153  for (int i = 0; i < fg->nb_outputs; i++) {
2154  const AVFrameSideData *const *sd;
2155  int nb_sd;
2156  OutputFilter *ofilter = fg->outputs[i];
2157  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2158  AVFilterContext *sink = ofilter->filter;
2159 
2160  ofp->format = av_buffersink_get_format(sink);
2161 
2162  ofp->width = av_buffersink_get_w(sink);
2163  ofp->height = av_buffersink_get_h(sink);
2167 
2168  // If the timing parameters are not locked yet, get the tentative values
2169  // here but don't lock them. They will only be used if no output frames
2170  // are ever produced.
2171  if (!ofp->tb_out_locked) {
2173  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
2174  fr.num > 0 && fr.den > 0)
2175  ofp->fps.framerate = fr;
2176  ofp->tb_out = av_buffersink_get_time_base(sink);
2177  }
2179 
2182  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
2183  if (ret < 0)
2184  goto fail;
2185  sd = av_buffersink_get_side_data(sink, &nb_sd);
2186  if (nb_sd)
2187  for (int j = 0; j < nb_sd; j++) {
2190  if (ret < 0) {
2192  goto fail;
2193  }
2194  }
2195  }
2196 
2197  for (int i = 0; i < fg->nb_inputs; i++) {
2198  InputFilter *ifilter = fg->inputs[i];
2200  AVFrame *tmp;
2201  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2202  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2203  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2204  } else {
2205  if (ifp->type_src == AVMEDIA_TYPE_VIDEO) {
2206  if (ifp->displaymatrix_applied)
2208  }
2209  ret = av_buffersrc_add_frame(ifilter->filter, tmp);
2210  }
2211  av_frame_free(&tmp);
2212  if (ret < 0)
2213  goto fail;
2214  }
2215  }
2216 
2217  /* send the EOFs for the finished inputs */
2218  for (int i = 0; i < fg->nb_inputs; i++) {
2219  InputFilter *ifilter = fg->inputs[i];
2220  if (fgt->eof_in[i]) {
2221  ret = av_buffersrc_add_frame(ifilter->filter, NULL);
2222  if (ret < 0)
2223  goto fail;
2224  have_input_eof = 1;
2225  }
2226  }
2227 
2228  if (have_input_eof) {
2229  // make sure the EOF propagates to the end of the graph
2231  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2232  goto fail;
2233  }
2234 
2235  return 0;
2236 fail:
2237  cleanup_filtergraph(fg, fgt);
2238  return ret;
2239 }
2240 
2242 {
2243  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2244  AVFrameSideData *sd;
2245  int ret;
2246 
2247  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2248  if (ret < 0)
2249  return ret;
2250 
2251  ifp->time_base = (ifilter->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2252  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2253  frame->time_base;
2254 
2255  ifp->format = frame->format;
2256 
2257  ifp->width = frame->width;
2258  ifp->height = frame->height;
2259  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2260  ifp->color_space = frame->colorspace;
2261  ifp->color_range = frame->color_range;
2262  ifp->alpha_mode = frame->alpha_mode;
2263 
2264  ifp->sample_rate = frame->sample_rate;
2265  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2266  if (ret < 0)
2267  return ret;
2268 
2270  for (int i = 0; i < frame->nb_side_data; i++) {
2271  const AVSideDataDescriptor *desc = av_frame_side_data_desc(frame->side_data[i]->type);
2272 
2273  if (!(desc->props & AV_SIDE_DATA_PROP_GLOBAL) ||
2274  frame->side_data[i]->type == AV_FRAME_DATA_DISPLAYMATRIX)
2275  continue;
2276 
2278  &ifp->nb_side_data,
2279  frame->side_data[i], 0);
2280  if (ret < 0)
2281  return ret;
2282  }
2283 
2285  if (sd)
2286  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2287  ifp->displaymatrix_present = !!sd;
2288 
2289  /* Copy downmix related side data to InputFilterPriv so it may be propagated
2290  * to the filter chain even though it's not "global", as filters like aresample
2291  * require this information during init and not when remixing a frame */
2293  if (sd) {
2295  &ifp->nb_side_data, sd, 0);
2296  if (ret < 0)
2297  return ret;
2298  memcpy(&ifp->downmixinfo, sd->data, sizeof(ifp->downmixinfo));
2299  }
2300  ifp->downmixinfo_present = !!sd;
2301 
2302  return 0;
2303 }
2304 
2306 {
2307  const OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2308  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2309 
2310  if (!ifp->opts.framerate.num) {
2311  ifp->opts.framerate = ofp->fps.framerate;
2312  if (ifp->opts.framerate.num > 0 && ifp->opts.framerate.den > 0)
2313  ifp->opts.flags |= IFILTER_FLAG_CFR;
2314  }
2315 
2316  for (int i = 0; i < ofp->nb_side_data; i++) {
2319  if (ret < 0)
2320  return ret;
2321  }
2322 
2323  return 0;
2324 }
2325 
2327 {
2328  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2329  return fgp->is_simple;
2330 }
2331 
2332 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2333  double time, const char *target,
2334  const char *command, const char *arg, int all_filters)
2335 {
2336  int ret;
2337 
2338  if (!graph)
2339  return;
2340 
2341  if (time < 0) {
2342  char response[4096];
2343  ret = avfilter_graph_send_command(graph, target, command, arg,
2344  response, sizeof(response),
2345  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2346  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2347  fg->index, ret, response);
2348  } else if (!all_filters) {
2349  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2350  } else {
2351  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2352  if (ret < 0)
2353  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2354  }
2355 }
2356 
2357 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2358 {
2359  int nb_requests, nb_requests_max = -1;
2360  int best_input = -1;
2361 
2362  for (int i = 0; i < fg->nb_inputs; i++) {
2363  InputFilter *ifilter = fg->inputs[i];
2364 
2365  if (fgt->eof_in[i])
2366  continue;
2367 
2368  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
2369  if (nb_requests > nb_requests_max) {
2370  nb_requests_max = nb_requests;
2371  best_input = i;
2372  }
2373  }
2374 
2375  av_assert0(best_input >= 0);
2376 
2377  return best_input;
2378 }
2379 
2381 {
2382  OutputFilter *ofilter = &ofp->ofilter;
2383  FPSConvContext *fps = &ofp->fps;
2384  AVRational tb = (AVRational){ 0, 0 };
2385  AVRational fr;
2386  const FrameData *fd;
2387 
2388  fd = frame_data_c(frame);
2389 
2390  // apply -enc_time_base
2391  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2392  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2393  av_log(ofp, AV_LOG_ERROR,
2394  "Demuxing timebase not available - cannot use it for encoding\n");
2395  return AVERROR(EINVAL);
2396  }
2397 
2398  switch (ofp->enc_timebase.num) {
2399  case 0: break;
2400  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2401  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2402  default: tb = ofp->enc_timebase; break;
2403  }
2404 
2405  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2406  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2407  goto finish;
2408  }
2409 
2410  fr = fps->framerate;
2411  if (!fr.num) {
2412  AVRational fr_sink = av_buffersink_get_frame_rate(ofilter->filter);
2413  if (fr_sink.num > 0 && fr_sink.den > 0)
2414  fr = fr_sink;
2415  }
2416 
2417  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2418  if (!fr.num && !fps->framerate_max.num) {
2419  fr = (AVRational){25, 1};
2420  av_log(ofp, AV_LOG_WARNING,
2421  "No information "
2422  "about the input framerate is available. Falling "
2423  "back to a default value of 25fps. Use the -r option "
2424  "if you want a different framerate.\n");
2425  }
2426 
2427  if (fps->framerate_max.num &&
2428  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2429  !fr.den))
2430  fr = fps->framerate_max;
2431  }
2432 
2433  if (fr.num > 0) {
2434  if (fps->framerate_supported) {
2435  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2436  fr = fps->framerate_supported[idx];
2437  }
2438  if (fps->framerate_clip) {
2439  av_reduce(&fr.num, &fr.den,
2440  fr.num, fr.den, fps->framerate_clip);
2441  }
2442  }
2443 
2444  if (!(tb.num > 0 && tb.den > 0))
2445  tb = av_inv_q(fr);
2446  if (!(tb.num > 0 && tb.den > 0))
2447  tb = frame->time_base;
2448 
2449  fps->framerate = fr;
2450 finish:
2451  ofp->tb_out = tb;
2452  ofp->tb_out_locked = 1;
2453 
2454  return 0;
2455 }
2456 
2457 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2458  AVRational tb_dst, int64_t start_time)
2459 {
2460  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2461 
2462  AVRational tb = tb_dst;
2463  AVRational filter_tb = frame->time_base;
2464  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2465 
2466  if (frame->pts == AV_NOPTS_VALUE)
2467  goto early_exit;
2468 
2469  tb.den <<= extra_bits;
2470  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2472  float_pts /= 1 << extra_bits;
2473  // when float_pts is not exactly an integer,
2474  // avoid exact midpoints to reduce the chance of rounding differences, this
2475  // can be removed in case the fps code is changed to work with integers
2476  if (float_pts != llrint(float_pts))
2477  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2478 
2479  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2481  frame->time_base = tb_dst;
2482 
2483 early_exit:
2484 
2485  if (debug_ts) {
2486  av_log(logctx, AV_LOG_INFO,
2487  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2488  frame ? av_ts2str(frame->pts) : "NULL",
2489  av_ts2timestr(frame->pts, &tb_dst),
2490  float_pts, tb_dst.num, tb_dst.den);
2491  }
2492 
2493  return float_pts;
2494 }
2495 
2497 {
2498  int64_t max2, min2, m;
2499 
2500  if (a >= b) {
2501  max2 = a;
2502  min2 = b;
2503  } else {
2504  max2 = b;
2505  min2 = a;
2506  }
2507  m = (c >= max2) ? max2 : c;
2508 
2509  return (m >= min2) ? m : min2;
2510 }
2511 
2512 
2513 /* Convert frame timestamps to the encoder timebase and decide how many times
2514  * should this (and possibly previous) frame be repeated in order to conform to
2515  * desired target framerate (if any).
2516  */
2518  int64_t *nb_frames, int64_t *nb_frames_prev)
2519 {
2520  OutputFilter *ofilter = &ofp->ofilter;
2521  FPSConvContext *fps = &ofp->fps;
2522  double delta0, delta, sync_ipts, duration;
2523 
2524  if (!frame) {
2525  *nb_frames_prev = *nb_frames = median3(fps->frames_prev_hist[0],
2526  fps->frames_prev_hist[1],
2527  fps->frames_prev_hist[2]);
2528 
2529  if (!*nb_frames && fps->last_dropped) {
2530  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2531  fps->last_dropped++;
2532  }
2533 
2534  goto finish;
2535  }
2536 
2537  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2538 
2539  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2540  ofp->tb_out, ofp->ts_offset);
2541  /* delta0 is the "drift" between the input frame and
2542  * where it would fall in the output. */
2543  delta0 = sync_ipts - ofp->next_pts;
2544  delta = delta0 + duration;
2545 
2546  // tracks the number of times the PREVIOUS frame should be duplicated,
2547  // mostly for variable framerate (VFR)
2548  *nb_frames_prev = 0;
2549  /* by default, we output a single frame */
2550  *nb_frames = 1;
2551 
2552  if (delta0 < 0 &&
2553  delta > 0 &&
2556  && fps->vsync_method != VSYNC_DROP
2557 #endif
2558  ) {
2559  if (delta0 < -0.6) {
2560  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2561  } else
2562  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2563  sync_ipts = ofp->next_pts;
2564  duration += delta0;
2565  delta0 = 0;
2566  }
2567 
2568  switch (fps->vsync_method) {
2569  case VSYNC_VSCFR:
2570  if (fps->frame_number == 0 && delta0 >= 0.5) {
2571  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2572  delta = duration;
2573  delta0 = 0;
2574  ofp->next_pts = llrint(sync_ipts);
2575  }
2577  case VSYNC_CFR:
2578  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2579  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2580  *nb_frames = 0;
2581  } else if (delta < -1.1)
2582  *nb_frames = 0;
2583  else if (delta > 1.1) {
2584  *nb_frames = llrintf(delta);
2585  if (delta0 > 1.1)
2586  *nb_frames_prev = llrintf(delta0 - 0.6);
2587  }
2588  frame->duration = 1;
2589  break;
2590  case VSYNC_VFR:
2591  if (delta <= -0.6)
2592  *nb_frames = 0;
2593  else if (delta > 0.6)
2594  ofp->next_pts = llrint(sync_ipts);
2595  frame->duration = llrint(duration);
2596  break;
2597 #if FFMPEG_OPT_VSYNC_DROP
2598  case VSYNC_DROP:
2599 #endif
2600  case VSYNC_PASSTHROUGH:
2601  ofp->next_pts = llrint(sync_ipts);
2602  frame->duration = llrint(duration);
2603  break;
2604  default:
2605  av_assert0(0);
2606  }
2607 
2608 finish:
2609  memmove(fps->frames_prev_hist + 1,
2610  fps->frames_prev_hist,
2611  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2612  fps->frames_prev_hist[0] = *nb_frames_prev;
2613 
2614  if (*nb_frames_prev == 0 && fps->last_dropped) {
2615  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2616  av_log(ofp, AV_LOG_VERBOSE,
2617  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2618  fps->frame_number, fps->last_frame->pts);
2619  }
2620  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2621  uint64_t nb_frames_dup;
2622  if (*nb_frames > dts_error_threshold * 30) {
2623  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2624  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2625  *nb_frames = 0;
2626  return;
2627  }
2628  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2629  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2630  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2631  if (nb_frames_dup > fps->dup_warning) {
2632  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2633  fps->dup_warning *= 10;
2634  }
2635  }
2636 
2637  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2638  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2639 }
2640 
2641 static void close_input(InputFilterPriv *ifp)
2642 {
2644 
2645  if (!ifp->eof) {
2647  ifp->eof = 1;
2648  }
2649 }
2650 
2652 {
2654  int ret;
2655 
2656  // we are finished and no frames were ever seen at this output,
2657  // at least initialize the encoder with a dummy frame
2658  if (!fgt->got_frame) {
2659  AVFrame *frame = fgt->frame;
2660  FrameData *fd;
2661 
2662  frame->time_base = ofp->tb_out;
2663  frame->format = ofp->format;
2664 
2665  frame->width = ofp->width;
2666  frame->height = ofp->height;
2667  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2668 
2669  frame->sample_rate = ofp->sample_rate;
2670  if (ofp->ch_layout.nb_channels) {
2671  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2672  if (ret < 0)
2673  return ret;
2674  }
2675 
2676  fd = frame_data(frame);
2677  if (!fd)
2678  return AVERROR(ENOMEM);
2679 
2682  ofp->side_data, ofp->nb_side_data, 0);
2683  if (ret < 0)
2684  return ret;
2685 
2686  fd->frame_rate_filter = ofp->fps.framerate;
2687 
2688  av_assert0(!frame->buf[0]);
2689 
2690  av_log(ofp, AV_LOG_WARNING,
2691  "No filtered frames for output stream, trying to "
2692  "initialize anyway.\n");
2693 
2694  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame);
2695  if (ret < 0) {
2697  return ret;
2698  }
2699  }
2700 
2701  fgt->eof_out[ofp->ofilter.index] = 1;
2702 
2703  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, NULL);
2704  return (ret == AVERROR_EOF) ? 0 : ret;
2705 }
2706 
2708  AVFrame *frame)
2709 {
2711  AVFrame *frame_prev = ofp->fps.last_frame;
2712  enum AVMediaType type = ofp->ofilter.type;
2713 
2714  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2715 
2716  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2717  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2718 
2719  for (int64_t i = 0; i < nb_frames; i++) {
2720  AVFrame *frame_out;
2721  int ret;
2722 
2723  if (type == AVMEDIA_TYPE_VIDEO) {
2724  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2725  frame_prev : frame;
2726  if (!frame_in)
2727  break;
2728 
2729  frame_out = fgp->frame_enc;
2730  ret = av_frame_ref(frame_out, frame_in);
2731  if (ret < 0)
2732  return ret;
2733 
2734  frame_out->pts = ofp->next_pts;
2735 
2736  if (ofp->fps.dropped_keyframe) {
2737  frame_out->flags |= AV_FRAME_FLAG_KEY;
2738  ofp->fps.dropped_keyframe = 0;
2739  }
2740  } else {
2741  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2742  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2744 
2745  frame->time_base = ofp->tb_out;
2746  frame->duration = av_rescale_q(frame->nb_samples,
2747  (AVRational){ 1, frame->sample_rate },
2748  ofp->tb_out);
2749 
2750  ofp->next_pts = frame->pts + frame->duration;
2751 
2752  frame_out = frame;
2753  }
2754 
2755  // send the frame to consumers
2756  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame_out);
2757  if (ret < 0) {
2758  av_frame_unref(frame_out);
2759 
2760  if (!fgt->eof_out[ofp->ofilter.index]) {
2761  fgt->eof_out[ofp->ofilter.index] = 1;
2762  fgp->nb_outputs_done++;
2763  }
2764 
2765  return ret == AVERROR_EOF ? 0 : ret;
2766  }
2767 
2768  if (type == AVMEDIA_TYPE_VIDEO) {
2769  ofp->fps.frame_number++;
2770  ofp->next_pts++;
2771 
2772  if (i == nb_frames_prev && frame)
2773  frame->flags &= ~AV_FRAME_FLAG_KEY;
2774  }
2775 
2776  fgt->got_frame = 1;
2777  }
2778 
2779  if (frame && frame_prev) {
2780  av_frame_unref(frame_prev);
2781  av_frame_move_ref(frame_prev, frame);
2782  }
2783 
2784  if (!frame)
2785  return close_output(ofp, fgt);
2786 
2787  return 0;
2788 }
2789 
2791  AVFrame *frame)
2792 {
2795  FrameData *fd;
2796  int ret;
2797 
2800  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->ofilter.index]) {
2801  ret = fg_output_frame(ofp, fgt, NULL);
2802  return (ret < 0) ? ret : 1;
2803  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2804  return 1;
2805  } else if (ret < 0) {
2806  av_log(ofp, AV_LOG_WARNING,
2807  "Error in retrieving a frame from the filtergraph: %s\n",
2808  av_err2str(ret));
2809  return ret;
2810  }
2811 
2812  if (fgt->eof_out[ofp->ofilter.index]) {
2814  return 0;
2815  }
2816 
2818 
2819  if (debug_ts)
2820  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2821  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2822  frame->time_base.num, frame->time_base.den);
2823 
2824  // Choose the output timebase the first time we get a frame.
2825  if (!ofp->tb_out_locked) {
2826  ret = choose_out_timebase(ofp, frame);
2827  if (ret < 0) {
2828  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2830  return ret;
2831  }
2832  }
2833 
2834  fd = frame_data(frame);
2835  if (!fd) {
2837  return AVERROR(ENOMEM);
2838  }
2839 
2841  if (!fgt->got_frame) {
2843  ofp->side_data, ofp->nb_side_data, 0);
2844  if (ret < 0)
2845  return ret;
2846  }
2847 
2849 
2850  // only use bits_per_raw_sample passed through from the decoder
2851  // if the filtergraph did not touch the frame data
2852  if (!fgp->is_meta)
2853  fd->bits_per_raw_sample = 0;
2854 
2855  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2856  if (!frame->duration) {
2858  if (fr.num > 0 && fr.den > 0)
2859  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2860  }
2861 
2862  fd->frame_rate_filter = ofp->fps.framerate;
2863  }
2864 
2865  ret = fg_output_frame(ofp, fgt, frame);
2867  if (ret < 0)
2868  return ret;
2869 
2870  return 0;
2871 }
2872 
2873 /* retrieve all frames available at filtergraph outputs
2874  * and send them to consumers */
2876  AVFrame *frame)
2877 {
2878  FilterGraphPriv *fgp = fgp_from_fg(fg);
2879  int did_step = 0;
2880 
2881  // graph not configured, just select the input to request
2882  if (!fgt->graph) {
2883  for (int i = 0; i < fg->nb_inputs; i++) {
2885  if (ifp->format < 0 && !fgt->eof_in[i]) {
2886  fgt->next_in = i;
2887  return 0;
2888  }
2889  }
2890 
2891  // This state - graph is not configured, but all inputs are either
2892  // initialized or EOF - should be unreachable because sending EOF to a
2893  // filter without even a fallback format should fail
2894  av_assert0(0);
2895  return AVERROR_BUG;
2896  }
2897 
2898  while (fgp->nb_outputs_done < fg->nb_outputs) {
2899  int ret;
2900 
2901  /* Reap all buffers present in the buffer sinks */
2902  for (int i = 0; i < fg->nb_outputs; i++) {
2904 
2905  ret = 0;
2906  while (!ret) {
2907  ret = fg_output_step(ofp, fgt, frame);
2908  if (ret < 0)
2909  return ret;
2910  }
2911  }
2912 
2913  // return after one iteration, so that scheduler can rate-control us
2914  if (did_step && fgp->have_sources)
2915  return 0;
2916 
2918  if (ret == AVERROR(EAGAIN)) {
2919  fgt->next_in = choose_input(fg, fgt);
2920  return 0;
2921  } else if (ret < 0) {
2922  if (ret == AVERROR_EOF)
2923  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2924  else
2925  av_log(fg, AV_LOG_ERROR,
2926  "Error requesting a frame from the filtergraph: %s\n",
2927  av_err2str(ret));
2928  return ret;
2929  }
2930  fgt->next_in = fg->nb_inputs;
2931 
2932  did_step = 1;
2933  }
2934 
2935  return AVERROR_EOF;
2936 }
2937 
2939 {
2940  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2941  int64_t pts2;
2942 
2943  /* subtitles seem to be usually muxed ahead of other streams;
2944  if not, subtracting a larger time here is necessary */
2945  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2946 
2947  /* do not send the heartbeat frame if the subtitle is already ahead */
2948  if (pts2 <= ifp->sub2video.last_pts)
2949  return;
2950 
2951  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2952  /* if we have hit the end of the current displayed subpicture,
2953  or if we need to initialize the system, update the
2954  overlaid subpicture and its start/end times */
2955  sub2video_update(ifp, pts2 + 1, NULL);
2956  else
2957  sub2video_push_ref(ifp, pts2);
2958 }
2959 
2960 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2961 {
2962  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2963  int ret;
2964 
2965  if (buffer) {
2966  AVFrame *tmp;
2967 
2968  if (!frame)
2969  return 0;
2970 
2971  tmp = av_frame_alloc();
2972  if (!tmp)
2973  return AVERROR(ENOMEM);
2974 
2976 
2977  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2978  if (ret < 0) {
2979  av_frame_free(&tmp);
2980  return ret;
2981  }
2982 
2983  return 0;
2984  }
2985 
2986  // heartbeat frame
2987  if (frame && !frame->buf[0]) {
2988  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2989  return 0;
2990  }
2991 
2992  if (!frame) {
2993  if (ifp->sub2video.end_pts < INT64_MAX)
2994  sub2video_update(ifp, INT64_MAX, NULL);
2995 
2996  return av_buffersrc_add_frame(ifilter->filter, NULL);
2997  }
2998 
2999  ifp->width = frame->width ? frame->width : ifp->width;
3000  ifp->height = frame->height ? frame->height : ifp->height;
3001 
3002  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
3003 
3004  return 0;
3005 }
3006 
3007 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
3008  int64_t pts, AVRational tb)
3009 {
3010  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
3011  int ret;
3012 
3013  if (fgt->eof_in[ifilter->index])
3014  return 0;
3015 
3016  fgt->eof_in[ifilter->index] = 1;
3017 
3018  if (ifilter->filter) {
3019  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
3021 
3023  if (ret < 0)
3024  return ret;
3025  } else {
3026  if (ifp->format < 0) {
3027  // the filtergraph was never configured, use the fallback parameters
3028  ifp->format = ifp->opts.fallback->format;
3029  ifp->sample_rate = ifp->opts.fallback->sample_rate;
3030  ifp->width = ifp->opts.fallback->width;
3031  ifp->height = ifp->opts.fallback->height;
3033  ifp->color_space = ifp->opts.fallback->colorspace;
3034  ifp->color_range = ifp->opts.fallback->color_range;
3035  ifp->alpha_mode = ifp->opts.fallback->alpha_mode;
3036  ifp->time_base = ifp->opts.fallback->time_base;
3037 
3039  &ifp->opts.fallback->ch_layout);
3040  if (ret < 0)
3041  return ret;
3042 
3044  ret = clone_side_data(&ifp->side_data, &ifp->nb_side_data,
3045  ifp->opts.fallback->side_data,
3046  ifp->opts.fallback->nb_side_data, 0);
3047  if (ret < 0)
3048  return ret;
3049 
3050  if (ifilter_has_all_input_formats(ifilter->graph)) {
3051  ret = configure_filtergraph(ifilter->graph, fgt);
3052  if (ret < 0) {
3053  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
3054  return ret;
3055  }
3056  }
3057  }
3058 
3059  if (ifp->format < 0) {
3060  av_log(ifilter->graph, AV_LOG_ERROR,
3061  "Cannot determine format of input %s after EOF\n",
3062  ifp->opts.name);
3063  return AVERROR_INVALIDDATA;
3064  }
3065  }
3066 
3067  return 0;
3068 }
3069 
3071  VIDEO_CHANGED = (1 << 0),
3072  AUDIO_CHANGED = (1 << 1),
3073  MATRIX_CHANGED = (1 << 2),
3074  DOWNMIX_CHANGED = (1 << 3),
3075  HWACCEL_CHANGED = (1 << 4)
3076 };
3077 
3078 static const char *unknown_if_null(const char *str)
3079 {
3080  return str ? str : "unknown";
3081 }
3082 
3084  InputFilter *ifilter, AVFrame *frame)
3085 {
3086  FilterGraphPriv *fgp = fgp_from_fg(fg);
3087  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
3088  FrameData *fd;
3089  AVFrameSideData *sd;
3090  int need_reinit = 0, ret;
3091 
3092  /* determine if the parameters for this input changed */
3093  switch (ifilter->type) {
3094  case AVMEDIA_TYPE_AUDIO:
3095  if (ifp->format != frame->format ||
3096  ifp->sample_rate != frame->sample_rate ||
3097  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
3098  need_reinit |= AUDIO_CHANGED;
3099  break;
3100  case AVMEDIA_TYPE_VIDEO:
3101  if (ifp->format != frame->format ||
3102  ifp->width != frame->width ||
3103  ifp->height != frame->height ||
3104  ifp->color_space != frame->colorspace ||
3105  ifp->color_range != frame->color_range ||
3106  ifp->alpha_mode != frame->alpha_mode)
3107  need_reinit |= VIDEO_CHANGED;
3108  break;
3109  }
3110 
3112  if (!ifp->displaymatrix_present ||
3113  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
3114  need_reinit |= MATRIX_CHANGED;
3115  } else if (ifp->displaymatrix_present)
3116  need_reinit |= MATRIX_CHANGED;
3117 
3119  if (!ifp->downmixinfo_present ||
3120  memcmp(sd->data, &ifp->downmixinfo, sizeof(ifp->downmixinfo)))
3121  need_reinit |= DOWNMIX_CHANGED;
3122  } else if (ifp->downmixinfo_present)
3123  need_reinit |= DOWNMIX_CHANGED;
3124 
3125  if (need_reinit && fgt->graph && (ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)) {
3126  ifp->nb_dropped++;
3127  av_log_once(fg, AV_LOG_WARNING, AV_LOG_DEBUG, &ifp->drop_warned, "Avoiding reinit; dropping frame pts: %s bound for %s\n", av_ts2str(frame->pts), ifilter->name);
3129  return 0;
3130  }
3131 
3132  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
3133  need_reinit = 0;
3134 
3135  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
3136  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
3137  need_reinit |= HWACCEL_CHANGED;
3138 
3139  if (need_reinit) {
3141  if (ret < 0)
3142  return ret;
3143 
3144  /* Inputs bound to a filtergraph output will have some fields unset.
3145  * Handle them here */
3146  if (ifp->ofilter_src) {
3148  if (ret < 0)
3149  return ret;
3150  }
3151  }
3152 
3153  /* (re)init the graph if possible, otherwise buffer the frame and return */
3154  if (need_reinit || !fgt->graph) {
3155  AVFrame *tmp = av_frame_alloc();
3156 
3157  if (!tmp)
3158  return AVERROR(ENOMEM);
3159 
3160  if (!ifilter_has_all_input_formats(fg)) {
3162 
3163  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
3164  if (ret < 0)
3165  av_frame_free(&tmp);
3166 
3167  return ret;
3168  }
3169 
3170  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
3171  av_frame_free(&tmp);
3172  if (ret < 0)
3173  return ret;
3174 
3175  if (fgt->graph) {
3176  AVBPrint reason;
3178  if (need_reinit & AUDIO_CHANGED) {
3179  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
3180  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
3181  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
3182  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
3183  }
3184  if (need_reinit & VIDEO_CHANGED) {
3185  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
3186  const char *color_space_name = av_color_space_name(frame->colorspace);
3187  const char *color_range_name = av_color_range_name(frame->color_range);
3188  const char *alpha_mode = av_alpha_mode_name(frame->alpha_mode);
3189  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, %s alpha, ",
3190  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
3191  unknown_if_null(color_space_name), frame->width, frame->height,
3192  unknown_if_null(alpha_mode));
3193  }
3194  if (need_reinit & MATRIX_CHANGED)
3195  av_bprintf(&reason, "display matrix changed, ");
3196  if (need_reinit & DOWNMIX_CHANGED)
3197  av_bprintf(&reason, "downmix medatata changed, ");
3198  if (need_reinit & HWACCEL_CHANGED)
3199  av_bprintf(&reason, "hwaccel changed, ");
3200  if (reason.len > 1)
3201  reason.str[reason.len - 2] = '\0'; // remove last comma
3202  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
3203  } else {
3204  /* Choke all input to avoid buffering excessive frames while the
3205  * initial filter graph is being configured, and before we have a
3206  * preferred input */
3207  sch_filter_choke_inputs(fgp->sch, fgp->sch_idx);
3208  }
3209 
3210  ret = configure_filtergraph(fg, fgt);
3211  if (ret < 0) {
3212  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
3213  return ret;
3214  }
3215  }
3216 
3217  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
3218  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
3219  frame->time_base = ifp->time_base;
3220 
3221  if (ifp->displaymatrix_applied)
3223 
3224  fd = frame_data(frame);
3225  if (!fd)
3226  return AVERROR(ENOMEM);
3228 
3231  if (ret < 0) {
3233  if (ret != AVERROR_EOF)
3234  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
3235  return ret;
3236  }
3237 
3238  return 0;
3239 }
3240 
3241 static void fg_thread_set_name(const FilterGraph *fg)
3242 {
3243  char name[16];
3244  if (filtergraph_is_simple(fg)) {
3245  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
3246  snprintf(name, sizeof(name), "%cf%s",
3248  ofp->ofilter.output_name);
3249  } else {
3250  snprintf(name, sizeof(name), "fc%d", fg->index);
3251  }
3252 
3254 }
3255 
3257 {
3258  if (fgt->frame_queue_out) {
3259  AVFrame *frame;
3260  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
3261  av_frame_free(&frame);
3263  }
3264 
3265  av_frame_free(&fgt->frame);
3266  av_freep(&fgt->eof_in);
3267  av_freep(&fgt->eof_out);
3268 
3269  avfilter_graph_free(&fgt->graph);
3270 
3271  memset(fgt, 0, sizeof(*fgt));
3272 }
3273 
3274 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
3275 {
3276  memset(fgt, 0, sizeof(*fgt));
3277 
3278  fgt->frame = av_frame_alloc();
3279  if (!fgt->frame)
3280  goto fail;
3281 
3282  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
3283  if (!fgt->eof_in)
3284  goto fail;
3285 
3286  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
3287  if (!fgt->eof_out)
3288  goto fail;
3289 
3291  if (!fgt->frame_queue_out)
3292  goto fail;
3293 
3294  return 0;
3295 
3296 fail:
3297  fg_thread_uninit(fgt);
3298  return AVERROR(ENOMEM);
3299 }
3300 
3301 static int filter_thread(void *arg)
3302 {
3303  FilterGraphPriv *fgp = arg;
3304  FilterGraph *fg = &fgp->fg;
3305 
3306  FilterGraphThread fgt;
3307  int ret = 0, input_status = 0;
3308 
3309  ret = fg_thread_init(&fgt, fg);
3310  if (ret < 0)
3311  goto finish;
3312 
3313  fg_thread_set_name(fg);
3314 
3315  // if we have all input parameters the graph can now be configured
3317  ret = configure_filtergraph(fg, &fgt);
3318  if (ret < 0) {
3319  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
3320  av_err2str(ret));
3321  goto finish;
3322  }
3323  }
3324 
3325  while (1) {
3326  InputFilter *ifilter;
3327  InputFilterPriv *ifp = NULL;
3328  enum FrameOpaque o;
3329  unsigned input_idx = fgt.next_in;
3330 
3331  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
3332  &input_idx, fgt.frame);
3333  if (input_status == AVERROR_EOF) {
3334  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3335  break;
3336  } else if (input_status == AVERROR(EAGAIN)) {
3337  // should only happen when we didn't request any input
3338  av_assert0(input_idx == fg->nb_inputs);
3339  goto read_frames;
3340  }
3341  av_assert0(input_status >= 0);
3342 
3343  o = (intptr_t)fgt.frame->opaque;
3344 
3345  // message on the control stream
3346  if (input_idx == fg->nb_inputs) {
3347  FilterCommand *fc;
3348 
3349  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3350 
3351  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3352  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3353  fc->all_filters);
3354  av_frame_unref(fgt.frame);
3355  continue;
3356  }
3357 
3358  // we received an input frame or EOF
3359  ifilter = fg->inputs[input_idx];
3360  ifp = ifp_from_ifilter(ifilter);
3361 
3362  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3363  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3364  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3365  !fgt.graph);
3366  } else if (fgt.frame->buf[0]) {
3367  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3368  } else {
3370  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3371  }
3372  av_frame_unref(fgt.frame);
3373  if (ret == AVERROR_EOF) {
3374  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3375  input_idx);
3376  close_input(ifp);
3377  continue;
3378  }
3379  if (ret < 0)
3380  goto finish;
3381 
3382 read_frames:
3383  // retrieve all newly available frames
3384  ret = read_frames(fg, &fgt, fgt.frame);
3385  if (ret == AVERROR_EOF) {
3386  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3387  if (ifp && ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)
3388  av_log(fg, AV_LOG_INFO, "Total changed input frames dropped : %"PRId64"\n", ifp->nb_dropped);
3389  break;
3390  } else if (ret < 0) {
3391  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3392  av_err2str(ret));
3393  goto finish;
3394  }
3395 
3396  // ensure all inputs no longer accepting data are closed
3397  for (int i = 0; fgt.graph && i < fg->nb_inputs; i++) {
3400  close_input(ifp);
3401  }
3402  }
3403 
3404  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3406 
3407  if (fgt.eof_out[i] || !fgt.graph)
3408  continue;
3409 
3410  ret = fg_output_frame(ofp, &fgt, NULL);
3411  if (ret < 0)
3412  goto finish;
3413  }
3414 
3415 finish:
3416 
3418  print_filtergraph(fg, fgt.graph);
3419 
3420  // EOF is normal termination
3421  if (ret == AVERROR_EOF)
3422  ret = 0;
3423 
3424  fg_thread_uninit(&fgt);
3425 
3426  return ret;
3427 }
3428 
3429 void fg_send_command(FilterGraph *fg, double time, const char *target,
3430  const char *command, const char *arg, int all_filters)
3431 {
3432  FilterGraphPriv *fgp = fgp_from_fg(fg);
3433  AVBufferRef *buf;
3434  FilterCommand *fc;
3435 
3436  fc = av_mallocz(sizeof(*fc));
3437  if (!fc)
3438  return;
3439 
3440  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3441  if (!buf) {
3442  av_freep(&fc);
3443  return;
3444  }
3445 
3446  fc->target = av_strdup(target);
3447  fc->command = av_strdup(command);
3448  fc->arg = av_strdup(arg);
3449  if (!fc->target || !fc->command || !fc->arg) {
3450  av_buffer_unref(&buf);
3451  return;
3452  }
3453 
3454  fc->time = time;
3455  fc->all_filters = all_filters;
3456 
3457  fgp->frame->buf[0] = buf;
3458  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3459 
3460  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3461 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
InputFilterPriv::nb_dropped
uint64_t nb_dropped
Definition: ffmpeg_filter.c:120
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2094
AVBufferSrcParameters::side_data
AVFrameSideData ** side_data
Definition: buffersrc.h:124
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:2023
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:93
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:469
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:686
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:367
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:631
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:393
av_clip
#define av_clip
Definition: common.h:100
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2600
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:391
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:309
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:70
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:109
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2099
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2357
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1553
FrameData::nb_side_data
int nb_side_data
Definition: ffmpeg.h:744
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:63
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:444
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:67
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:53
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:95
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:615
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:132
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:987
FrameData
Definition: ffmpeg.h:722
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2332
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:153
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:291
OutputFilter::apad
char * apad
Definition: ffmpeg.h:406
out
static FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:659
clone_side_data
static int clone_side_data(AVFrameSideData ***dst, int *nb_dst, AVFrameSideData *const *src, int nb_src, unsigned int flags)
Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
Definition: ffmpeg_utils.h:50
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:428
OutputFilterPriv::sample_fmts
enum AVSampleFormat * sample_fmts
Definition: ffmpeg_filter.c:229
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:931
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:418
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:155
AVBufferSrcParameters::nb_side_data
int nb_side_data
Definition: buffersrc.h:125
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:289
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:37
AVFrame::nb_side_data
int nb_side_data
Definition: frame.h:633
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2241
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1011
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:648
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:3075
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:62
close_input
static void close_input(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:2641
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:1048
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:134
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1832
av_alpha_mode_name
const char * av_alpha_mode_name(enum AVAlphaMode mode)
Definition: pixdesc.c:3921
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:258
AVSubtitleRect
Definition: avcodec.h:2067
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2098
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1757
OutputFilterPriv::crop_left
unsigned crop_left
Definition: ffmpeg_filter.c:207
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:1017
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:173
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:573
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:697
InputFile::index
int index
Definition: ffmpeg.h:532
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:435
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:757
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:537
AVFrame::width
int width
Definition: frame.h:507
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:48
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:55
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:814
AVOption
AVOption.
Definition: opt.h:429
InputFilterPriv::ofilter_src
OutputFilter * ofilter_src
Definition: ffmpeg_filter.c:111
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2707
b
#define b
Definition: input.c:43
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:191
FilterGraph::index
int index
Definition: ffmpeg.h:416
OutputFilter::index
int index
Definition: ffmpeg.h:395
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:131
data
const char data[16]
Definition: mxf.c:149
InputFilter::index
int index
Definition: ffmpeg.h:376
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:177
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:243
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2033
OutputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:203
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:419
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:3071
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:129
AVDictionary
Definition: dict.c:32
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:679
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:250
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: side_data.c:249
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:268
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1750
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:612
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:326
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2790
FilterGraphPriv
Definition: ffmpeg_filter.c:44
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:600
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:98
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2068
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:192
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1002
InputStream
Definition: ffmpeg.h:483
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:76
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:70
OutputFilterOptions
Definition: ffmpeg.h:311
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:276
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:297
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:140
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:167
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:89
OutputFilterPriv
Definition: ffmpeg_filter.c:188
FrameData::dec
struct FrameData::@6 dec
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:3256
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:493
fail
#define fail()
Definition: checkasm.h:224
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:379
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:329
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:85
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
print_filtergraph
int print_filtergraph(FilterGraph *fg, AVFilterGraph *graph)
Definition: graphprint.c:947
samplefmt.h
OutputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:210
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:275
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:618
AVFrame::alpha_mode
enum AVAlphaMode alpha_mode
Indicates how the alpha channel of the video is to be handled.
Definition: frame.h:790
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:778
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1853
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
AVDownmixInfo
This structure describes optional metadata relevant to a downmix procedure.
Definition: downmix_info.h:58
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:825
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:2049
median3
static int64_t median3(int64_t a, int64_t b, int64_t c)
Definition: ffmpeg_filter.c:2496
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:85
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:156
FrameData::tb
AVRational tb
Definition: ffmpeg.h:732
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:222
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:71
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:199
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:178
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:403
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:103
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:277
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:40
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:859
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2938
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
OutputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:211
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:240
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:735
InputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:137
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:3007
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:102
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:650
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:347
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:988
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:933
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:264
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:71
llrintf
#define llrintf(x)
Definition: libm.h:401
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:682
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:65
DOWNMIX_CHANGED
@ DOWNMIX_CHANGED
Definition: ffmpeg_filter.c:3074
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:108
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:420
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:642
AVDictionaryEntry::key
char * key
Definition: dict.h:91
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:118
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:741
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:404
InputFilter
Definition: ffmpeg.h:373
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:58
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:494
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:306
print_graphs_file
char * print_graphs_file
Definition: ffmpeg_opt.c:81
InputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:387
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2100
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:3274
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:279
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:288
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:374
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:304
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
InputFilterPriv::sub2video
struct InputFilterPriv::@10 sub2video
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:245
av_fallthrough
#define av_fallthrough
Definition: attributes.h:67
av_buffersink_get_alpha_mode
enum AVAlphaMode av_buffersink_get_alpha_mode(const AVFilterContext *ctx)
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1075
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1187
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
arg
const char * arg
Definition: jacosubdec.c:65
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:231
if
if(ret)
Definition: filter_design.txt:179
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:198
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:290
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3772
AVBufferSrcParameters::alpha_mode
enum AVAlphaMode alpha_mode
Video only, the alpha mode.
Definition: buffersrc.h:130
AVFormatContext
Format I/O context.
Definition: avformat.h:1263
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:656
opts
static AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:767
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:394
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1434
OutputFilterPriv::crop_right
unsigned crop_right
Definition: ffmpeg_filter.c:208
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:239
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:127
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:885
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:774
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:200
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:865
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:175
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:491
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:162
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:143
Decoder
Definition: ffmpeg.h:469
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:305
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:934
av_frame_side_data_remove
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
Definition: side_data.c:103
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:591
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:929
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:220
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:656
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2651
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:90
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:68
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:739
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1567
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:957
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1226
AVFilterGraph
Definition: avfilter.h:589
AV_FRAME_SIDE_DATA_FLAG_REPLACE
#define AV_FRAME_SIDE_DATA_FLAG_REPLACE
Don't add a new entry if another of the same type exists.
Definition: frame.h:1061
InputFilterPriv::downmixinfo_present
int downmixinfo_present
Definition: ffmpeg_filter.c:147
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:244
InputFilterOptions
Definition: ffmpeg.h:275
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char **graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1249
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:126
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:743
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:421
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:870
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:499
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:108
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:273
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:45
FilterGraphPriv::nb_threads
int nb_threads
Definition: ffmpeg_filter.c:60
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:189
FilterGraph
Definition: ffmpeg.h:414
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:946
OutputFilterPriv::crop_bottom
unsigned crop_bottom
Definition: ffmpeg_filter.c:206
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:78
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:293
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:381
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:754
AVAlphaMode
AVAlphaMode
Correlation between the alpha channel and color values.
Definition: pixfmt.h:810
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:291
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:594
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2326
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:66
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1985
FrameData::side_data
AVFrameSideData ** side_data
Definition: ffmpeg.h:743
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:269
f
f
Definition: af_crystalizer.c:122
OutputFilter::output_name
char * output_name
Definition: ffmpeg.h:399
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:3301
AVMediaType
AVMediaType
Definition: avutil.h:198
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:141
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:91
FilterGraphThread
Definition: ffmpeg_filter.c:82
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:145
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:234
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:83
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:108
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:751
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:598
av_buffersrc_get_status
int av_buffersrc_get_status(AVFilterContext *ctx)
Returns 0 or a negative AVERROR code.
Definition: buffersrc.c:300
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:218
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
sch_filter_choke_inputs
void sch_filter_choke_inputs(Scheduler *sch, unsigned fg_idx)
Called by filtergraph tasks to choke all filter inputs, preventing them from receiving more frames un...
Definition: ffmpeg_sched.c:2663
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:328
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:188
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:128
OutputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:237
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:3073
FilterCommand::time
double time
Definition: ffmpeg_filter.c:260
InputFilterPriv::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:157
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:144
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1484
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:552
AVFrameSideData::data
uint8_t * data
Definition: frame.h:292
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:522
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:56
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:477
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2097
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:99
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1540
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:180
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:725
OutputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:397
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:125
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:2042
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1492
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:3072
attributes.h
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2529
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter, int commit)
Definition: ffmpeg_filter.c:1301
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:233
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:3078
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:294
decoders
Decoder ** decoders
Definition: ffmpeg.c:117
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:191
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:719
nb_decoders
int nb_decoders
Definition: ffmpeg.c:118
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:408
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2875
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:811
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2050
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:3083
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:959
buffersink.h
av_buffersink_get_side_data
const AVFrameSideData *const * av_buffersink_get_side_data(const AVFilterContext *ctx, int *nb_side_data)
Definition: buffersink.c:380
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:841
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:202
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:261
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:185
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:169
filter_buffered_frames
int filter_buffered_frames
Definition: ffmpeg_opt.c:78
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:122
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:549
FPSConvContext
Definition: ffmpeg_filter.c:166
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:737
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: side_data.c:134
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3429
downmix_info.h
sch_remove_filtergraph
void sch_remove_filtergraph(Scheduler *sch, int idx)
Definition: ffmpeg_sched.c:480
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:50
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:299
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:210
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:69
src2
const pixel * src2
Definition: h264pred_template.c:421
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1972
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:700
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:183
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
needed
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
Definition: filter_design.txt:212
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
OutputFilterPriv::crop_top
unsigned crop_top
Definition: ffmpeg_filter.c:205
delta
float delta
Definition: vorbis_enc_data.h:430
print_graphs
int print_graphs
Definition: ffmpeg_opt.c:80
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:90
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:534
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:76
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:573
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:117
ifilter_parameters_from_ofilter
static int ifilter_parameters_from_ofilter(InputFilter *ifilter, OutputFilter *ofilter)
Definition: ffmpeg_filter.c:2305
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
IFILTER_FLAG_DROPCHANGED
@ IFILTER_FLAG_DROPCHANGED
Definition: ffmpeg.h:272
AVFrame::side_data
AVFrameSideData ** side_data
Definition: frame.h:632
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:114
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:312
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:201
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
OFILTER_FLAG_CROP
@ OFILTER_FLAG_CROP
Definition: ffmpeg.h:308
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:957
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:60
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
OutputFilterPriv::needed
int needed
Definition: ffmpeg_filter.c:194
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2653
AVFilter
Filter definition.
Definition: avfilter.h:216
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2517
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:161
OFILTER_FLAG_AUTOROTATE
@ OFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:307
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:92
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:744
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
AVALPHA_MODE_UNSPECIFIED
@ AVALPHA_MODE_UNSPECIFIED
Unknown alpha handling, or no alpha channel.
Definition: pixfmt.h:811
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:375
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:73
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:785
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:167
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:548
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1594
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:244
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:122
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:3070
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:532
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:992
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:88
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:223
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:507
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:750
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:271
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:386
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:630
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:118
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:918
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:487
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:932
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:139
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:115
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:443
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:53
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1543
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:270
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:183
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:615
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:951
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2380
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:247
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:232
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:338
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:298
InputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:136
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:958
OutputFilterPriv::alpha_modes
enum AVAlphaMode * alpha_modes
Definition: ffmpeg_filter.c:235
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:415
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:450
OutputFilter
Definition: ffmpeg.h:390
InputFilterPriv::drop_warned
int drop_warned
Definition: ffmpeg_filter.c:119
av_log_once
void av_log_once(void *avcl, int initial_level, int subsequent_level, int *state, const char *fmt,...)
Definition: log.c:451
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2960
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:103
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1620
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:130
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:352
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:233
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:492
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
av_strdup
#define av_strdup(s)
Definition: ops_asmgen.c:47
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2457
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:411
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:83
llrint
#define llrint(x)
Definition: libm.h:396
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:290
bind_inputs
static int bind_inputs(FilterGraph *fg, int commit)
Definition: ffmpeg_filter.c:1474
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
InputStream::index
int index
Definition: ffmpeg.h:489
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2573
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:90
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:79
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:256
OutputFilterPriv::pix_fmts
enum AVPixelFormat * pix_fmts
Definition: ffmpeg_filter.c:228
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: side_data.c:63
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1082
fg_create
int fg_create(FilterGraph **pfg, char **graph_desc, Scheduler *sch, const OutputFilterOptions *opts)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1089
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:260
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:303
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:247
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:472
AVFormatContext::name
char * name
Name of this format context, only used for logging purposes.
Definition: avformat.h:1888
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:123
InputFilterPriv::end_pts
int64_t end_pts
Definition: ffmpeg_filter.c:154
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:115
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1159
int32_t
int32_t
Definition: audioconvert.c:56
InputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:129
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:345
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:617
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:197
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1454
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
graphprint.h
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:105
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:57
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:241
read_file_to_string
char * read_file_to_string(const char *filename)
Definition: cmdutils.c:1571
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
InputFilterPriv::downmixinfo
AVDownmixInfo downmixinfo
Definition: ffmpeg_filter.c:148
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:130
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:92
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:592
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:277
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:742
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:483
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:215
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:746
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:68
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:198
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:182
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:3241
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:176
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1842
FilterGraph::is_internal
int is_internal
Definition: ffmpeg.h:426
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2096
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:257
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:255
duration
static int64_t duration
Definition: ffplay.c:329
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:125
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:410
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:77
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:286
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
InputFilter::input_name
char * input_name
Definition: ffmpeg.h:383
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:104
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:184