FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/mem.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/samplefmt.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
40 
41 // FIXME private header, used for mid_pred()
42 #include "libavcodec/mathops.h"
43 
44 typedef struct FilterGraphPriv {
46 
47  // name used for logging
48  char log_name[32];
49 
50  int is_simple;
51  // true when the filtergraph contains only meta filters
52  // that do not modify the frame data
53  int is_meta;
54  // source filters are present in the graph
57 
58  unsigned nb_outputs_done;
59 
60  const char *graph_desc;
61 
62  char *nb_threads;
63 
64  // frame for temporarily holding output from the filtergraph
66  // frame for sending output to the encoder
68 
70  unsigned sch_idx;
72 
74 {
75  return (FilterGraphPriv*)fg;
76 }
77 
78 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
79 {
80  return (const FilterGraphPriv*)fg;
81 }
82 
83 // data that is local to the filter thread and not visible outside of it
84 typedef struct FilterGraphThread {
86 
88 
89  // Temporary buffer for output frames, since on filtergraph reset
90  // we cannot send them to encoders immediately.
91  // The output index is stored in frame opaque.
93 
94  // index of the next input to request from the scheduler
95  unsigned next_in;
96  // set to 1 after at least one frame passed through this output
97  int got_frame;
98 
99  // EOF status of each input/output, as received by the thread
100  uint8_t *eof_in;
101  uint8_t *eof_out;
103 
104 typedef struct InputFilterPriv {
106 
108 
109  int index;
110 
112 
113  // used to hold submitted input
115 
116  /* for filters that are not yet bound to an input stream,
117  * this stores the input linklabel, if any */
118  uint8_t *linklabel;
119 
120  // filter data type
122  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
123  // same as type otherwise
125 
126  int eof;
127  int bound;
128 
129  // parameters configured for this input
130  int format;
131 
132  int width, height;
136 
139 
141 
143 
145 
149 
150  struct {
151  AVFrame *frame;
152 
155 
156  ///< marks if sub2video_update should force an initialization
157  unsigned int initialize;
158  } sub2video;
160 
162 {
163  return (InputFilterPriv*)ifilter;
164 }
165 
166 typedef struct FPSConvContext {
168  /* number of frames emitted by the video-encoding sync code */
170  /* history of nb_frames_prev, i.e. the number of times the
171  * previous frame was duplicated by vsync code in recent
172  * do_video_out() calls */
174 
175  uint64_t dup_warning;
176 
179 
181 
187 
188 typedef struct OutputFilterPriv {
190 
191  int index;
192 
193  void *log_parent;
194  char log_name[32];
195 
196  char *name;
197 
199 
200  /* desired output stream properties */
201  int format;
202  int width, height;
207 
208  // time base in which the output is sent to our downstream
209  // does not need to match the filtersink's timebase
211  // at least one frame with the above timebase was sent
212  // to our downstream, so it cannot change anymore
214 
216 
219 
220  // those are only set if no format is specified and the encoder gives us multiple options
221  // They point directly to the relevant lists of the encoder.
222  const int *formats;
224  const int *sample_rates;
227 
231  // offset for output timestamps, in AV_TIME_BASE_Q
235 
236  unsigned flags;
238 
240 {
241  return (OutputFilterPriv*)ofilter;
242 }
243 
244 typedef struct FilterCommand {
245  char *target;
246  char *command;
247  char *arg;
248 
249  double time;
251 } FilterCommand;
252 
253 static void filter_command_free(void *opaque, uint8_t *data)
254 {
256 
257  av_freep(&fc->target);
258  av_freep(&fc->command);
259  av_freep(&fc->arg);
260 
261  av_free(data);
262 }
263 
265 {
266  AVFrame *frame = ifp->sub2video.frame;
267  int ret;
268 
270 
271  frame->width = ifp->width;
272  frame->height = ifp->height;
273  frame->format = ifp->format;
274  frame->colorspace = ifp->color_space;
275  frame->color_range = ifp->color_range;
276 
278  if (ret < 0)
279  return ret;
280 
281  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
282 
283  return 0;
284 }
285 
286 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
287  AVSubtitleRect *r)
288 {
289  uint32_t *pal, *dst2;
290  uint8_t *src, *src2;
291  int x, y;
292 
293  if (r->type != SUBTITLE_BITMAP) {
294  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
295  return;
296  }
297  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
298  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
299  r->x, r->y, r->w, r->h, w, h
300  );
301  return;
302  }
303 
304  dst += r->y * dst_linesize + r->x * 4;
305  src = r->data[0];
306  pal = (uint32_t *)r->data[1];
307  for (y = 0; y < r->h; y++) {
308  dst2 = (uint32_t *)dst;
309  src2 = src;
310  for (x = 0; x < r->w; x++)
311  *(dst2++) = pal[*(src2++)];
312  dst += dst_linesize;
313  src += r->linesize[0];
314  }
315 }
316 
318 {
319  AVFrame *frame = ifp->sub2video.frame;
320  int ret;
321 
322  av_assert1(frame->data[0]);
323  ifp->sub2video.last_pts = frame->pts = pts;
327  if (ret != AVERROR_EOF && ret < 0)
329  "Error while add the frame to buffer source(%s).\n",
330  av_err2str(ret));
331 }
332 
333 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
334  const AVSubtitle *sub)
335 {
336  AVFrame *frame = ifp->sub2video.frame;
337  int8_t *dst;
338  int dst_linesize;
339  int num_rects;
340  int64_t pts, end_pts;
341 
342  if (sub) {
343  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
344  AV_TIME_BASE_Q, ifp->time_base);
345  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
346  AV_TIME_BASE_Q, ifp->time_base);
347  num_rects = sub->num_rects;
348  } else {
349  /* If we are initializing the system, utilize current heartbeat
350  PTS as the start time, and show until the following subpicture
351  is received. Otherwise, utilize the previous subpicture's end time
352  as the fall-back value. */
353  pts = ifp->sub2video.initialize ?
354  heartbeat_pts : ifp->sub2video.end_pts;
355  end_pts = INT64_MAX;
356  num_rects = 0;
357  }
358  if (sub2video_get_blank_frame(ifp) < 0) {
360  "Impossible to get a blank canvas.\n");
361  return;
362  }
363  dst = frame->data [0];
364  dst_linesize = frame->linesize[0];
365  for (int i = 0; i < num_rects; i++)
366  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
367  sub2video_push_ref(ifp, pts);
368  ifp->sub2video.end_pts = end_pts;
369  ifp->sub2video.initialize = 0;
370 }
371 
372 /* Define a function for appending a list of allowed formats
373  * to an AVBPrint. If nonempty, the list will have a header. */
374 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
375 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
376 { \
377  if (ofp->var == none && !ofp->supported_list) \
378  return; \
379  av_bprintf(bprint, #name "="); \
380  if (ofp->var != none) { \
381  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
382  } else { \
383  const type *p; \
384  \
385  for (p = ofp->supported_list; *p != none; p++) { \
386  av_bprintf(bprint, printf_format "|", get_name(*p)); \
387  } \
388  if (bprint->len > 0) \
389  bprint->str[--bprint->len] = '\0'; \
390  } \
391  av_bprint_chars(bprint, ':', 1); \
392 }
393 
396 
399 
401  "%d", )
402 
403 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
405 
406 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
408 
409 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
410 {
411  if (av_channel_layout_check(&ofp->ch_layout)) {
412  av_bprintf(bprint, "channel_layouts=");
413  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
414  } else if (ofp->ch_layouts) {
415  const AVChannelLayout *p;
416 
417  av_bprintf(bprint, "channel_layouts=");
418  for (p = ofp->ch_layouts; p->nb_channels; p++) {
420  av_bprintf(bprint, "|");
421  }
422  if (bprint->len > 0)
423  bprint->str[--bprint->len] = '\0';
424  } else
425  return;
426  av_bprint_chars(bprint, ':', 1);
427 }
428 
429 static int read_binary(void *logctx, const char *path,
430  uint8_t **data, int *len)
431 {
432  AVIOContext *io = NULL;
433  int64_t fsize;
434  int ret;
435 
436  *data = NULL;
437  *len = 0;
438 
439  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
440  if (ret < 0) {
441  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
442  path, av_err2str(ret));
443  return ret;
444  }
445 
446  fsize = avio_size(io);
447  if (fsize < 0 || fsize > INT_MAX) {
448  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
449  ret = AVERROR(EIO);
450  goto fail;
451  }
452 
453  *data = av_malloc(fsize);
454  if (!*data) {
455  ret = AVERROR(ENOMEM);
456  goto fail;
457  }
458 
459  ret = avio_read(io, *data, fsize);
460  if (ret != fsize) {
461  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
462  ret = ret < 0 ? ret : AVERROR(EIO);
463  goto fail;
464  }
465 
466  *len = fsize;
467 
468  ret = 0;
469 fail:
470  avio_close(io);
471  if (ret < 0) {
472  av_freep(data);
473  *len = 0;
474  }
475  return ret;
476 }
477 
478 static int filter_opt_apply(void *logctx, AVFilterContext *f,
479  const char *key, const char *val)
480 {
481  const AVOption *o = NULL;
482  int ret;
483 
485  if (ret >= 0)
486  return 0;
487 
488  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
490  if (!o)
491  goto err_apply;
492 
493  // key is a valid option name prefixed with '/'
494  // interpret value as a path from which to load the actual option value
495  key++;
496 
497  if (o->type == AV_OPT_TYPE_BINARY) {
498  uint8_t *data;
499  int len;
500 
501  ret = read_binary(logctx, val, &data, &len);
502  if (ret < 0)
503  goto err_load;
504 
506  av_freep(&data);
507  } else {
508  char *data = file_read(val);
509  if (!data) {
510  ret = AVERROR(EIO);
511  goto err_load;
512  }
513 
515  av_freep(&data);
516  }
517  if (ret < 0)
518  goto err_apply;
519 
520  return 0;
521 
522 err_apply:
523  av_log(logctx, AV_LOG_ERROR,
524  "Error applying option '%s' to filter '%s': %s\n",
525  key, f->filter->name, av_err2str(ret));
526  return ret;
527 err_load:
528  av_log(logctx, AV_LOG_ERROR,
529  "Error loading value for option '%s' from file '%s'\n",
530  key, val);
531  return ret;
532 }
533 
534 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
535 {
536  for (size_t i = 0; i < seg->nb_chains; i++) {
537  AVFilterChain *ch = seg->chains[i];
538 
539  for (size_t j = 0; j < ch->nb_filters; j++) {
540  AVFilterParams *p = ch->filters[j];
541  const AVDictionaryEntry *e = NULL;
542 
543  av_assert0(p->filter);
544 
545  while ((e = av_dict_iterate(p->opts, e))) {
546  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
547  if (ret < 0)
548  return ret;
549  }
550 
551  av_dict_free(&p->opts);
552  }
553  }
554 
555  return 0;
556 }
557 
558 static int graph_parse(void *logctx,
559  AVFilterGraph *graph, const char *desc,
561  AVBufferRef *hw_device)
562 {
564  int ret;
565 
566  *inputs = NULL;
567  *outputs = NULL;
568 
569  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
570  if (ret < 0)
571  return ret;
572 
574  if (ret < 0)
575  goto fail;
576 
577  if (hw_device) {
578  for (int i = 0; i < graph->nb_filters; i++) {
579  AVFilterContext *f = graph->filters[i];
580 
581  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
582  continue;
583  f->hw_device_ctx = av_buffer_ref(hw_device);
584  if (!f->hw_device_ctx) {
585  ret = AVERROR(ENOMEM);
586  goto fail;
587  }
588  }
589  }
590 
591  ret = graph_opts_apply(logctx, seg);
592  if (ret < 0)
593  goto fail;
594 
596 
597 fail:
599  return ret;
600 }
601 
602 // Filters can be configured only if the formats of all inputs are known.
604 {
605  for (int i = 0; i < fg->nb_inputs; i++) {
607  if (ifp->format < 0)
608  return 0;
609  }
610  return 1;
611 }
612 
613 static int filter_thread(void *arg);
614 
615 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
616 {
617  AVFilterContext *ctx = inout->filter_ctx;
618  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
619  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
620 
621  if (nb_pads > 1)
622  return av_strdup(ctx->filter->name);
623  return av_asprintf("%s:%s", ctx->filter->name,
624  avfilter_pad_get_name(pads, inout->pad_idx));
625 }
626 
627 static const char *ofilter_item_name(void *obj)
628 {
629  OutputFilterPriv *ofp = obj;
630  return ofp->log_name;
631 }
632 
633 static const AVClass ofilter_class = {
634  .class_name = "OutputFilter",
635  .version = LIBAVUTIL_VERSION_INT,
636  .item_name = ofilter_item_name,
637  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
638  .category = AV_CLASS_CATEGORY_FILTER,
639 };
640 
642 {
643  OutputFilterPriv *ofp;
644  OutputFilter *ofilter;
645 
646  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
647  if (!ofp)
648  return NULL;
649 
650  ofilter = &ofp->ofilter;
651  ofilter->class = &ofilter_class;
652  ofp->log_parent = fg;
653  ofilter->graph = fg;
654  ofilter->type = type;
655  ofp->format = -1;
658  ofp->index = fg->nb_outputs - 1;
659 
660  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
662 
663  return ofilter;
664 }
665 
666 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
667  const ViewSpecifier *vs)
668 {
669  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
670  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
672  int ret;
673 
674  av_assert0(!ifp->bound);
675  ifp->bound = 1;
676 
677  if (ifp->type != ist->par->codec_type &&
679  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
681  return AVERROR(EINVAL);
682  }
683 
684  ifp->type_src = ist->st->codecpar->codec_type;
685 
686  ifp->opts.fallback = av_frame_alloc();
687  if (!ifp->opts.fallback)
688  return AVERROR(ENOMEM);
689 
690  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
691  vs, &ifp->opts, &src);
692  if (ret < 0)
693  return ret;
694 
695  ret = sch_connect(fgp->sch,
696  src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
697  if (ret < 0)
698  return ret;
699 
700  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
701  ifp->sub2video.frame = av_frame_alloc();
702  if (!ifp->sub2video.frame)
703  return AVERROR(ENOMEM);
704 
705  ifp->width = ifp->opts.sub2video_width;
706  ifp->height = ifp->opts.sub2video_height;
707 
708  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
709  palettes for all rectangles are identical or compatible */
710  ifp->format = AV_PIX_FMT_RGB32;
711 
712  ifp->time_base = AV_TIME_BASE_Q;
713 
714  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
715  ifp->width, ifp->height);
716  }
717 
718  return 0;
719 }
720 
722  const ViewSpecifier *vs)
723 {
726  int ret;
727 
728  av_assert0(!ifp->bound);
729  ifp->bound = 1;
730 
731  if (ifp->type != dec->type) {
732  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
734  return AVERROR(EINVAL);
735  }
736 
737  ifp->type_src = ifp->type;
738 
739  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
740  if (ret < 0)
741  return ret;
742 
743  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
744  if (ret < 0)
745  return ret;
746 
747  return 0;
748 }
749 
750 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
751  const AVChannelLayout *layout_requested)
752 {
753  int i, err;
754 
755  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
756  /* Pass the layout through for all orders but UNSPEC */
757  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
758  if (err < 0)
759  return err;
760  return 0;
761  }
762 
763  /* Requested layout is of order UNSPEC */
764  if (!layouts_allowed) {
765  /* Use the default native layout for the requested amount of channels when the
766  encoder doesn't have a list of supported layouts */
767  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
768  return 0;
769  }
770  /* Encoder has a list of supported layouts. Pick the first layout in it with the
771  same amount of channels as the requested layout */
772  for (i = 0; layouts_allowed[i].nb_channels; i++) {
773  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
774  break;
775  }
776  if (layouts_allowed[i].nb_channels) {
777  /* Use it if one is found */
778  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
779  if (err < 0)
780  return err;
781  return 0;
782  }
783  /* If no layout for the amount of channels requested was found, use the default
784  native layout for it. */
785  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
786 
787  return 0;
788 }
789 
790 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
791  const OutputFilterOptions *opts)
792 {
793  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
794  FilterGraph *fg = ofilter->graph;
795  FilterGraphPriv *fgp = fgp_from_fg(fg);
796  int ret;
797 
798  av_assert0(!ofilter->bound);
799  av_assert0(!opts->enc ||
800  ofilter->type == opts->enc->type);
801 
802  ofilter->bound = 1;
803  av_freep(&ofilter->linklabel);
804 
805  ofp->flags = opts->flags;
806  ofp->ts_offset = opts->ts_offset;
807  ofp->enc_timebase = opts->output_tb;
808 
809  ofp->trim_start_us = opts->trim_start_us;
810  ofp->trim_duration_us = opts->trim_duration_us;
811 
812  ofp->name = av_strdup(opts->name);
813  if (!ofp->name)
814  return AVERROR(EINVAL);
815 
816  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
817  if (ret < 0)
818  return ret;
819 
820  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
821  if (ret < 0)
822  return ret;
823 
824  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
825  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
826 
827  if (fgp->is_simple) {
828  // for simple filtergraph there is just one output,
829  // so use only graph-level information for logging
830  ofp->log_parent = NULL;
831  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
832  } else
833  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
834 
835  switch (ofilter->type) {
836  case AVMEDIA_TYPE_VIDEO:
837  ofp->width = opts->width;
838  ofp->height = opts->height;
839  if (opts->format != AV_PIX_FMT_NONE) {
840  ofp->format = opts->format;
841  } else
842  ofp->formats = opts->formats;
843 
844  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
845  ofp->color_space = opts->color_space;
846  else
847  ofp->color_spaces = opts->color_spaces;
848 
849  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
850  ofp->color_range = opts->color_range;
851  else
852  ofp->color_ranges = opts->color_ranges;
853 
855 
856  ofp->fps.last_frame = av_frame_alloc();
857  if (!ofp->fps.last_frame)
858  return AVERROR(ENOMEM);
859 
860  ofp->fps.vsync_method = opts->vsync_method;
861  ofp->fps.framerate = opts->frame_rate;
862  ofp->fps.framerate_max = opts->max_frame_rate;
863  ofp->fps.framerate_supported = opts->frame_rates;
864 
865  // reduce frame rate for mpeg4 to be within the spec limits
866  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
867  ofp->fps.framerate_clip = 65535;
868 
869  ofp->fps.dup_warning = 1000;
870 
871  break;
872  case AVMEDIA_TYPE_AUDIO:
873  if (opts->format != AV_SAMPLE_FMT_NONE) {
874  ofp->format = opts->format;
875  } else {
876  ofp->formats = opts->formats;
877  }
878  if (opts->sample_rate) {
879  ofp->sample_rate = opts->sample_rate;
880  } else
881  ofp->sample_rates = opts->sample_rates;
882  if (opts->ch_layout.nb_channels) {
883  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
884  if (ret < 0)
885  return ret;
886  } else {
887  ofp->ch_layouts = opts->ch_layouts;
888  }
889  break;
890  }
891 
892  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
893  SCH_ENC(sched_idx_enc));
894  if (ret < 0)
895  return ret;
896 
897  return 0;
898 }
899 
901  const OutputFilterOptions *opts)
902 {
903  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
904 
905  av_assert0(!ofilter->bound);
906  av_assert0(ofilter->type == ifp->type);
907 
908  ofilter->bound = 1;
909  av_freep(&ofilter->linklabel);
910 
911  ofp->name = av_strdup(opts->name);
912  if (!ofp->name)
913  return AVERROR(EINVAL);
914 
915  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
916 
917  return 0;
918 }
919 
920 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
921 {
923  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
925  char name[32];
926  int ret;
927 
928  av_assert0(!ifp->bound);
929  ifp->bound = 1;
930 
931  if (ifp->type != ofilter_src->type) {
932  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
933  av_get_media_type_string(ofilter_src->type),
935  return AVERROR(EINVAL);
936  }
937 
938  ifp->type_src = ifp->type;
939 
940  memset(&opts, 0, sizeof(opts));
941 
942  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->index);
943  opts.name = name;
944 
945  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
946  if (ret < 0)
947  return ret;
948 
949  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
950  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
951  if (ret < 0)
952  return ret;
953 
954  return 0;
955 }
956 
958 {
959  InputFilterPriv *ifp;
960  InputFilter *ifilter;
961 
962  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
963  if (!ifp)
964  return NULL;
965 
966  ifilter = &ifp->ifilter;
967  ifilter->graph = fg;
968 
969  ifp->frame = av_frame_alloc();
970  if (!ifp->frame)
971  return NULL;
972 
973  ifp->index = fg->nb_inputs - 1;
974  ifp->format = -1;
977 
979  if (!ifp->frame_queue)
980  return NULL;
981 
982  return ifilter;
983 }
984 
985 void fg_free(FilterGraph **pfg)
986 {
987  FilterGraph *fg = *pfg;
988  FilterGraphPriv *fgp;
989 
990  if (!fg)
991  return;
992  fgp = fgp_from_fg(fg);
993 
994  for (int j = 0; j < fg->nb_inputs; j++) {
995  InputFilter *ifilter = fg->inputs[j];
996  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
997 
998  if (ifp->frame_queue) {
999  AVFrame *frame;
1000  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1001  av_frame_free(&frame);
1002  av_fifo_freep2(&ifp->frame_queue);
1003  }
1004  av_frame_free(&ifp->sub2video.frame);
1005 
1006  av_frame_free(&ifp->frame);
1007  av_frame_free(&ifp->opts.fallback);
1008 
1010  av_freep(&ifp->linklabel);
1011  av_freep(&ifp->opts.name);
1012  av_freep(&ifilter->name);
1013  av_freep(&fg->inputs[j]);
1014  }
1015  av_freep(&fg->inputs);
1016  for (int j = 0; j < fg->nb_outputs; j++) {
1017  OutputFilter *ofilter = fg->outputs[j];
1018  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1019 
1020  av_frame_free(&ofp->fps.last_frame);
1021  av_dict_free(&ofp->sws_opts);
1022  av_dict_free(&ofp->swr_opts);
1023 
1024  av_freep(&ofilter->linklabel);
1025  av_freep(&ofilter->name);
1026  av_freep(&ofilter->apad);
1027  av_freep(&ofp->name);
1029  av_freep(&fg->outputs[j]);
1030  }
1031  av_freep(&fg->outputs);
1032  av_freep(&fgp->graph_desc);
1033  av_freep(&fgp->nb_threads);
1034 
1035  av_frame_free(&fgp->frame);
1036  av_frame_free(&fgp->frame_enc);
1037 
1038  av_freep(pfg);
1039 }
1040 
1041 static const char *fg_item_name(void *obj)
1042 {
1043  const FilterGraphPriv *fgp = obj;
1044 
1045  return fgp->log_name;
1046 }
1047 
1048 static const AVClass fg_class = {
1049  .class_name = "FilterGraph",
1050  .version = LIBAVUTIL_VERSION_INT,
1051  .item_name = fg_item_name,
1052  .category = AV_CLASS_CATEGORY_FILTER,
1053 };
1054 
1055 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
1056 {
1057  FilterGraphPriv *fgp;
1058  FilterGraph *fg;
1059 
1061  AVFilterGraph *graph;
1062  int ret = 0;
1063 
1064  fgp = av_mallocz(sizeof(*fgp));
1065  if (!fgp) {
1066  av_freep(&graph_desc);
1067  return AVERROR(ENOMEM);
1068  }
1069  fg = &fgp->fg;
1070 
1071  if (pfg) {
1072  *pfg = fg;
1073  fg->index = -1;
1074  } else {
1076  if (ret < 0) {
1077  av_freep(&graph_desc);
1078  av_freep(&fgp);
1079  return ret;
1080  }
1081 
1082  fg->index = nb_filtergraphs - 1;
1083  }
1084 
1085  fg->class = &fg_class;
1086  fgp->graph_desc = graph_desc;
1088  fgp->sch = sch;
1089 
1090  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1091 
1092  fgp->frame = av_frame_alloc();
1093  fgp->frame_enc = av_frame_alloc();
1094  if (!fgp->frame || !fgp->frame_enc)
1095  return AVERROR(ENOMEM);
1096 
1097  /* this graph is only used for determining the kinds of inputs
1098  * and outputs we have, and is discarded on exit from this function */
1099  graph = avfilter_graph_alloc();
1100  if (!graph)
1101  return AVERROR(ENOMEM);;
1102  graph->nb_threads = 1;
1103 
1104  ret = graph_parse(fg, graph, fgp->graph_desc, &inputs, &outputs,
1106  if (ret < 0)
1107  goto fail;
1108 
1109  for (unsigned i = 0; i < graph->nb_filters; i++) {
1110  const AVFilter *f = graph->filters[i]->filter;
1111  if ((!avfilter_filter_pad_count(f, 0) &&
1112  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1113  !strcmp(f->name, "apad")) {
1114  fgp->have_sources = 1;
1115  break;
1116  }
1117  }
1118 
1119  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1120  InputFilter *const ifilter = ifilter_alloc(fg);
1121  InputFilterPriv *ifp;
1122 
1123  if (!ifilter) {
1124  ret = AVERROR(ENOMEM);
1125  goto fail;
1126  }
1127 
1128  ifp = ifp_from_ifilter(ifilter);
1129  ifp->linklabel = cur->name;
1130  cur->name = NULL;
1131 
1132  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1133  cur->pad_idx);
1134 
1135  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1136  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1137  "currently.\n");
1138  ret = AVERROR(ENOSYS);
1139  goto fail;
1140  }
1141 
1142  ifilter->name = describe_filter_link(fg, cur, 1);
1143  if (!ifilter->name) {
1144  ret = AVERROR(ENOMEM);
1145  goto fail;
1146  }
1147  }
1148 
1149  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1150  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1151  cur->pad_idx);
1152  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1153 
1154  if (!ofilter) {
1155  ret = AVERROR(ENOMEM);
1156  goto fail;
1157  }
1158 
1159  ofilter->linklabel = cur->name;
1160  cur->name = NULL;
1161 
1162  ofilter->name = describe_filter_link(fg, cur, 0);
1163  if (!ofilter->name) {
1164  ret = AVERROR(ENOMEM);
1165  goto fail;
1166  }
1167  }
1168 
1169  if (!fg->nb_outputs) {
1170  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1171  ret = AVERROR(ENOSYS);
1172  goto fail;
1173  }
1174 
1175  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1176  filter_thread, fgp);
1177  if (ret < 0)
1178  goto fail;
1179  fgp->sch_idx = ret;
1180 
1181 fail:
1184  avfilter_graph_free(&graph);
1185 
1186  if (ret < 0)
1187  return ret;
1188 
1189  return 0;
1190 }
1191 
1193  InputStream *ist,
1194  char *graph_desc,
1195  Scheduler *sch, unsigned sched_idx_enc,
1196  const OutputFilterOptions *opts)
1197 {
1198  const enum AVMediaType type = ist->par->codec_type;
1199  FilterGraph *fg;
1200  FilterGraphPriv *fgp;
1201  int ret;
1202 
1203  ret = fg_create(pfg, graph_desc, sch);
1204  if (ret < 0)
1205  return ret;
1206  fg = *pfg;
1207  fgp = fgp_from_fg(fg);
1208 
1209  fgp->is_simple = 1;
1210 
1211  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1212  av_get_media_type_string(type)[0], opts->name);
1213 
1214  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1215  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1216  "to have exactly 1 input and 1 output. "
1217  "However, it had %d input(s) and %d output(s). Please adjust, "
1218  "or use a complex filtergraph (-filter_complex) instead.\n",
1219  graph_desc, fg->nb_inputs, fg->nb_outputs);
1220  return AVERROR(EINVAL);
1221  }
1222  if (fg->outputs[0]->type != type) {
1223  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1224  "it to %s output stream\n",
1227  return AVERROR(EINVAL);
1228  }
1229 
1230  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1231  if (ret < 0)
1232  return ret;
1233 
1234  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1235  if (ret < 0)
1236  return ret;
1237 
1238  if (opts->nb_threads) {
1239  av_freep(&fgp->nb_threads);
1240  fgp->nb_threads = av_strdup(opts->nb_threads);
1241  if (!fgp->nb_threads)
1242  return AVERROR(ENOMEM);
1243  }
1244 
1245  return 0;
1246 }
1247 
1249 {
1250  FilterGraphPriv *fgp = fgp_from_fg(fg);
1251  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1252  InputStream *ist = NULL;
1253  enum AVMediaType type = ifp->type;
1255  const char *spec;
1256  char *p;
1257  int i, ret;
1258 
1259  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1260  // bind to a standalone decoder
1261  int dec_idx;
1262 
1263  dec_idx = strtol(ifp->linklabel + 4, &p, 0);
1264  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1265  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1266  dec_idx, fgp->graph_desc);
1267  return AVERROR(EINVAL);
1268  }
1269 
1270  if (type == AVMEDIA_TYPE_VIDEO) {
1271  spec = *p == ':' ? p + 1 : p;
1272  ret = view_specifier_parse(&spec, &vs);
1273  if (ret < 0)
1274  return ret;
1275  }
1276 
1277  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1278  if (ret < 0)
1279  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1280  ifilter->name);
1281  return ret;
1282  } else if (ifp->linklabel) {
1284  AVFormatContext *s;
1285  AVStream *st = NULL;
1286  int file_idx;
1287 
1288  // try finding an unbound filtergraph output with this label
1289  for (int i = 0; i < nb_filtergraphs; i++) {
1290  FilterGraph *fg_src = filtergraphs[i];
1291 
1292  if (fg == fg_src)
1293  continue;
1294 
1295  for (int j = 0; j < fg_src->nb_outputs; j++) {
1296  OutputFilter *ofilter = fg_src->outputs[j];
1297 
1298  if (!ofilter->bound && ofilter->linklabel &&
1299  !strcmp(ofilter->linklabel, ifp->linklabel)) {
1300  av_log(fg, AV_LOG_VERBOSE,
1301  "Binding input with label '%s' to filtergraph output %d:%d\n",
1302  ifp->linklabel, i, j);
1303 
1304  ret = ifilter_bind_fg(ifp, fg_src, j);
1305  if (ret < 0)
1306  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1307  ifp->linklabel);
1308  return ret;
1309  }
1310  }
1311  }
1312 
1313  // bind to an explicitly specified demuxer stream
1314  file_idx = strtol(ifp->linklabel, &p, 0);
1315  if (file_idx < 0 || file_idx >= nb_input_files) {
1316  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1317  file_idx, fgp->graph_desc);
1318  return AVERROR(EINVAL);
1319  }
1320  s = input_files[file_idx]->ctx;
1321 
1322  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1323  if (ret < 0) {
1324  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1325  return ret;
1326  }
1327 
1328  if (type == AVMEDIA_TYPE_VIDEO) {
1329  spec = ss.remainder ? ss.remainder : "";
1330  ret = view_specifier_parse(&spec, &vs);
1331  if (ret < 0) {
1333  return ret;
1334  }
1335  }
1336 
1337  for (i = 0; i < s->nb_streams; i++) {
1338  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1339  if (stream_type != type &&
1340  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1341  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1342  continue;
1343  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1344  st = s->streams[i];
1345  break;
1346  }
1347  }
1349  if (!st) {
1350  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1351  "matches no streams.\n", p, fgp->graph_desc);
1352  return AVERROR(EINVAL);
1353  }
1354  ist = input_files[file_idx]->streams[st->index];
1355 
1356  av_log(fg, AV_LOG_VERBOSE,
1357  "Binding input with label '%s' to input stream %d:%d\n",
1358  ifp->linklabel, ist->file->index, ist->index);
1359  } else {
1360  ist = ist_find_unused(type);
1361  if (!ist) {
1362  av_log(fg, AV_LOG_FATAL,
1363  "Cannot find an unused %s input stream to feed the "
1364  "unlabeled input pad %s.\n",
1365  av_get_media_type_string(type), ifilter->name);
1366  return AVERROR(EINVAL);
1367  }
1368 
1369  av_log(fg, AV_LOG_VERBOSE,
1370  "Binding unlabeled input %d to input stream %d:%d\n",
1371  ifp->index, ist->file->index, ist->index);
1372  }
1373  av_assert0(ist);
1374 
1375  ret = ifilter_bind_ist(ifilter, ist, &vs);
1376  if (ret < 0) {
1377  av_log(fg, AV_LOG_ERROR,
1378  "Error binding an input stream to complex filtergraph input %s.\n",
1379  ifilter->name);
1380  return ret;
1381  }
1382 
1383  return 0;
1384 }
1385 
1386 static int bind_inputs(FilterGraph *fg)
1387 {
1388  // bind filtergraph inputs to input streams or other filtergraphs
1389  for (int i = 0; i < fg->nb_inputs; i++) {
1391  int ret;
1392 
1393  if (ifp->bound)
1394  continue;
1395 
1396  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1397  if (ret < 0)
1398  return ret;
1399  }
1400 
1401  return 0;
1402 }
1403 
1405 {
1406  int ret;
1407 
1408  for (int i = 0; i < nb_filtergraphs; i++) {
1410  if (ret < 0)
1411  return ret;
1412  }
1413 
1414  // check that all outputs were bound
1415  for (int i = 0; i < nb_filtergraphs; i++) {
1416  FilterGraph *fg = filtergraphs[i];
1417 
1418  for (int j = 0; j < fg->nb_outputs; j++) {
1419  OutputFilter *output = fg->outputs[j];
1420  if (!output->bound) {
1421  av_log(fg, AV_LOG_FATAL,
1422  "Filter '%s' has output %d (%s) unconnected\n",
1423  output->name, j,
1424  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1425  return AVERROR(EINVAL);
1426  }
1427  }
1428  }
1429 
1430  return 0;
1431 }
1432 
1433 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1434  AVFilterContext **last_filter, int *pad_idx,
1435  const char *filter_name)
1436 {
1437  AVFilterGraph *graph = (*last_filter)->graph;
1439  const AVFilter *trim;
1440  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1441  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1442  int ret = 0;
1443 
1444  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1445  return 0;
1446 
1447  trim = avfilter_get_by_name(name);
1448  if (!trim) {
1449  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1450  "recording time.\n", name);
1451  return AVERROR_FILTER_NOT_FOUND;
1452  }
1453 
1454  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1455  if (!ctx)
1456  return AVERROR(ENOMEM);
1457 
1458  if (duration != INT64_MAX) {
1459  ret = av_opt_set_int(ctx, "durationi", duration,
1461  }
1462  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1463  ret = av_opt_set_int(ctx, "starti", start_time,
1465  }
1466  if (ret < 0) {
1467  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1468  return ret;
1469  }
1470 
1472  if (ret < 0)
1473  return ret;
1474 
1475  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1476  if (ret < 0)
1477  return ret;
1478 
1479  *last_filter = ctx;
1480  *pad_idx = 0;
1481  return 0;
1482 }
1483 
1484 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1485  const char *filter_name, const char *args)
1486 {
1487  AVFilterGraph *graph = (*last_filter)->graph;
1488  const AVFilter *filter = avfilter_get_by_name(filter_name);
1490  int ret;
1491 
1492  if (!filter)
1493  return AVERROR_BUG;
1494 
1496  filter,
1497  filter_name, args, NULL, graph);
1498  if (ret < 0)
1499  return ret;
1500 
1501  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1502  if (ret < 0)
1503  return ret;
1504 
1505  *last_filter = ctx;
1506  *pad_idx = 0;
1507  return 0;
1508 }
1509 
1511  OutputFilter *ofilter, AVFilterInOut *out)
1512 {
1513  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1514  AVFilterContext *last_filter = out->filter_ctx;
1515  AVBPrint bprint;
1516  int pad_idx = out->pad_idx;
1517  int ret;
1518  char name[255];
1519 
1520  snprintf(name, sizeof(name), "out_%s", ofp->name);
1522  avfilter_get_by_name("buffersink"),
1523  name, NULL, NULL, graph);
1524 
1525  if (ret < 0)
1526  return ret;
1527 
1528  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1529  char args[255];
1531  const AVDictionaryEntry *e = NULL;
1532 
1533  snprintf(args, sizeof(args), "%d:%d",
1534  ofp->width, ofp->height);
1535 
1536  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1537  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1538  }
1539 
1540  snprintf(name, sizeof(name), "scaler_out_%s", ofp->name);
1542  name, args, NULL, graph)) < 0)
1543  return ret;
1544  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1545  return ret;
1546 
1547  last_filter = filter;
1548  pad_idx = 0;
1549  }
1550 
1552  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1554  choose_pix_fmts(ofp, &bprint);
1555  choose_color_spaces(ofp, &bprint);
1556  choose_color_ranges(ofp, &bprint);
1557  if (!av_bprint_is_complete(&bprint))
1558  return AVERROR(ENOMEM);
1559 
1560  if (bprint.len) {
1562 
1564  avfilter_get_by_name("format"),
1565  "format", bprint.str, NULL, graph);
1566  av_bprint_finalize(&bprint, NULL);
1567  if (ret < 0)
1568  return ret;
1569  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1570  return ret;
1571 
1572  last_filter = filter;
1573  pad_idx = 0;
1574  }
1575 
1576  snprintf(name, sizeof(name), "trim_out_%s", ofp->name);
1577  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1578  &last_filter, &pad_idx, name);
1579  if (ret < 0)
1580  return ret;
1581 
1582 
1583  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1584  return ret;
1585 
1586  return 0;
1587 }
1588 
1590  OutputFilter *ofilter, AVFilterInOut *out)
1591 {
1592  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1593  AVFilterContext *last_filter = out->filter_ctx;
1594  int pad_idx = out->pad_idx;
1595  AVBPrint args;
1596  char name[255];
1597  int ret;
1598 
1599  snprintf(name, sizeof(name), "out_%s", ofp->name);
1601  avfilter_get_by_name("abuffersink"),
1602  name, NULL, NULL, graph);
1603  if (ret < 0)
1604  return ret;
1605 
1606 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1607  AVFilterContext *filt_ctx; \
1608  \
1609  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1610  "similarly to -af " filter_name "=%s.\n", arg); \
1611  \
1612  ret = avfilter_graph_create_filter(&filt_ctx, \
1613  avfilter_get_by_name(filter_name), \
1614  filter_name, arg, NULL, graph); \
1615  if (ret < 0) \
1616  goto fail; \
1617  \
1618  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1619  if (ret < 0) \
1620  goto fail; \
1621  \
1622  last_filter = filt_ctx; \
1623  pad_idx = 0; \
1624 } while (0)
1626 
1627  choose_sample_fmts(ofp, &args);
1628  choose_sample_rates(ofp, &args);
1629  choose_channel_layouts(ofp, &args);
1630  if (!av_bprint_is_complete(&args)) {
1631  ret = AVERROR(ENOMEM);
1632  goto fail;
1633  }
1634  if (args.len) {
1636 
1637  snprintf(name, sizeof(name), "format_out_%s", ofp->name);
1639  avfilter_get_by_name("aformat"),
1640  name, args.str, NULL, graph);
1641  if (ret < 0)
1642  goto fail;
1643 
1644  ret = avfilter_link(last_filter, pad_idx, format, 0);
1645  if (ret < 0)
1646  goto fail;
1647 
1648  last_filter = format;
1649  pad_idx = 0;
1650  }
1651 
1652  if (ofilter->apad) {
1653  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1654  fgp->have_sources = 1;
1655  }
1656 
1657  snprintf(name, sizeof(name), "trim for output %s", ofp->name);
1658  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1659  &last_filter, &pad_idx, name);
1660  if (ret < 0)
1661  goto fail;
1662 
1663  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1664  goto fail;
1665 fail:
1666  av_bprint_finalize(&args, NULL);
1667 
1668  return ret;
1669 }
1670 
1672  OutputFilter *ofilter, AVFilterInOut *out)
1673 {
1674  switch (ofilter->type) {
1675  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1676  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1677  default: av_assert0(0); return 0;
1678  }
1679 }
1680 
1682 {
1683  ifp->sub2video.last_pts = INT64_MIN;
1684  ifp->sub2video.end_pts = INT64_MIN;
1685 
1686  /* sub2video structure has been (re-)initialized.
1687  Mark it as such so that the system will be
1688  initialized with the first received heartbeat. */
1689  ifp->sub2video.initialize = 1;
1690 }
1691 
1693  InputFilter *ifilter, AVFilterInOut *in)
1694 {
1695  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1696 
1697  AVFilterContext *last_filter;
1698  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1699  const AVPixFmtDescriptor *desc;
1700  char name[255];
1701  int ret, pad_idx = 0;
1703  if (!par)
1704  return AVERROR(ENOMEM);
1705 
1706  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1707  sub2video_prepare(ifp);
1708 
1709  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1710  ifp->opts.name);
1711 
1712  ifp->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1713  if (!ifp->filter) {
1714  ret = AVERROR(ENOMEM);
1715  goto fail;
1716  }
1717 
1718  par->format = ifp->format;
1719  par->time_base = ifp->time_base;
1720  par->frame_rate = ifp->opts.framerate;
1721  par->width = ifp->width;
1722  par->height = ifp->height;
1723  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1724  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1725  par->color_space = ifp->color_space;
1726  par->color_range = ifp->color_range;
1727  par->hw_frames_ctx = ifp->hw_frames_ctx;
1728  ret = av_buffersrc_parameters_set(ifp->filter, par);
1729  if (ret < 0)
1730  goto fail;
1731  av_freep(&par);
1732 
1733  ret = avfilter_init_dict(ifp->filter, NULL);
1734  if (ret < 0)
1735  goto fail;
1736 
1737  last_filter = ifp->filter;
1738 
1740  av_assert0(desc);
1741 
1742  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1743  char crop_buf[64];
1744  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1745  ifp->opts.crop_left, ifp->opts.crop_right,
1746  ifp->opts.crop_top, ifp->opts.crop_bottom,
1747  ifp->opts.crop_left, ifp->opts.crop_top);
1748  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1749  if (ret < 0)
1750  return ret;
1751  }
1752 
1753  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1754  ifp->displaymatrix_applied = 0;
1755  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1756  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1757  int32_t *displaymatrix = ifp->displaymatrix;
1758  double theta;
1759 
1760  theta = get_rotation(displaymatrix);
1761 
1762  if (fabs(theta - 90) < 1.0) {
1763  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1764  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1765  } else if (fabs(theta - 180) < 1.0) {
1766  if (displaymatrix[0] < 0) {
1767  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1768  if (ret < 0)
1769  return ret;
1770  }
1771  if (displaymatrix[4] < 0) {
1772  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1773  }
1774  } else if (fabs(theta - 270) < 1.0) {
1775  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1776  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1777  } else if (fabs(theta) > 1.0) {
1778  char rotate_buf[64];
1779  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1780  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1781  } else if (fabs(theta) < 1.0) {
1782  if (displaymatrix && displaymatrix[4] < 0) {
1783  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1784  }
1785  }
1786  if (ret < 0)
1787  return ret;
1788 
1789  ifp->displaymatrix_applied = 1;
1790  }
1791 
1792  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1793  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1794  &last_filter, &pad_idx, name);
1795  if (ret < 0)
1796  return ret;
1797 
1798  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1799  return ret;
1800  return 0;
1801 fail:
1802  av_freep(&par);
1803 
1804  return ret;
1805 }
1806 
1808  InputFilter *ifilter, AVFilterInOut *in)
1809 {
1810  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1811  AVFilterContext *last_filter;
1812  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1813  AVBPrint args;
1814  char name[255];
1815  int ret, pad_idx = 0;
1816 
1818  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1819  ifp->time_base.num, ifp->time_base.den,
1820  ifp->sample_rate,
1822  if (av_channel_layout_check(&ifp->ch_layout) &&
1824  av_bprintf(&args, ":channel_layout=");
1826  } else
1827  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1828  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1829 
1830  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1831  name, args.str, NULL,
1832  graph)) < 0)
1833  return ret;
1834  last_filter = ifp->filter;
1835 
1836  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1837  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1838  &last_filter, &pad_idx, name);
1839  if (ret < 0)
1840  return ret;
1841 
1842  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1843  return ret;
1844 
1845  return 0;
1846 }
1847 
1849  InputFilter *ifilter, AVFilterInOut *in)
1850 {
1851  switch (ifp_from_ifilter(ifilter)->type) {
1852  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1853  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1854  default: av_assert0(0); return 0;
1855  }
1856 }
1857 
1859 {
1860  for (int i = 0; i < fg->nb_outputs; i++)
1862  for (int i = 0; i < fg->nb_inputs; i++)
1863  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1864  avfilter_graph_free(&fgt->graph);
1865 }
1866 
1868 {
1869  return f->nb_inputs == 0 &&
1870  (!strcmp(f->filter->name, "buffer") ||
1871  !strcmp(f->filter->name, "abuffer"));
1872 }
1873 
1874 static int graph_is_meta(AVFilterGraph *graph)
1875 {
1876  for (unsigned i = 0; i < graph->nb_filters; i++) {
1877  const AVFilterContext *f = graph->filters[i];
1878 
1879  /* in addition to filters flagged as meta, also
1880  * disregard sinks and buffersources (but not other sources,
1881  * since they introduce data we are not aware of)
1882  */
1883  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1884  f->nb_outputs == 0 ||
1886  return 0;
1887  }
1888  return 1;
1889 }
1890 
1891 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1892 
1894 {
1895  FilterGraphPriv *fgp = fgp_from_fg(fg);
1896  AVBufferRef *hw_device;
1897  AVFilterInOut *inputs, *outputs, *cur;
1898  int ret, i, simple = filtergraph_is_simple(fg);
1899  int have_input_eof = 0;
1900  const char *graph_desc = fgp->graph_desc;
1901 
1902  cleanup_filtergraph(fg, fgt);
1903  fgt->graph = avfilter_graph_alloc();
1904  if (!fgt->graph)
1905  return AVERROR(ENOMEM);
1906 
1907  if (simple) {
1908  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1909 
1910  if (filter_nbthreads) {
1911  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1912  if (ret < 0)
1913  goto fail;
1914  } else if (fgp->nb_threads) {
1915  ret = av_opt_set(fgt->graph, "threads", fgp->nb_threads, 0);
1916  if (ret < 0)
1917  return ret;
1918  }
1919 
1920  if (av_dict_count(ofp->sws_opts)) {
1922  &fgt->graph->scale_sws_opts,
1923  '=', ':');
1924  if (ret < 0)
1925  goto fail;
1926  }
1927 
1928  if (av_dict_count(ofp->swr_opts)) {
1929  char *args;
1930  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1931  if (ret < 0)
1932  goto fail;
1933  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1934  av_free(args);
1935  }
1936  } else {
1938  }
1939 
1940  hw_device = hw_device_for_filter();
1941 
1942  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
1943  if (ret < 0)
1944  goto fail;
1945 
1946  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1947  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1950  goto fail;
1951  }
1953 
1954  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1955  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
1956  if (ret < 0) {
1958  goto fail;
1959  }
1960  }
1962 
1963  if (fgp->disable_conversions)
1965  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1966  goto fail;
1967 
1968  fgp->is_meta = graph_is_meta(fgt->graph);
1969 
1970  /* limit the lists of allowed formats to the ones selected, to
1971  * make sure they stay the same if the filtergraph is reconfigured later */
1972  for (int i = 0; i < fg->nb_outputs; i++) {
1973  OutputFilter *ofilter = fg->outputs[i];
1974  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1975  AVFilterContext *sink = ofp->filter;
1976 
1977  ofp->format = av_buffersink_get_format(sink);
1978 
1979  ofp->width = av_buffersink_get_w(sink);
1980  ofp->height = av_buffersink_get_h(sink);
1983 
1984  // If the timing parameters are not locked yet, get the tentative values
1985  // here but don't lock them. They will only be used if no output frames
1986  // are ever produced.
1987  if (!ofp->tb_out_locked) {
1989  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1990  fr.num > 0 && fr.den > 0)
1991  ofp->fps.framerate = fr;
1992  ofp->tb_out = av_buffersink_get_time_base(sink);
1993  }
1995 
1998  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1999  if (ret < 0)
2000  goto fail;
2001  }
2002 
2003  for (int i = 0; i < fg->nb_inputs; i++) {
2005  AVFrame *tmp;
2006  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2007  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2008  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2009  } else {
2011  }
2012  av_frame_free(&tmp);
2013  if (ret < 0)
2014  goto fail;
2015  }
2016  }
2017 
2018  /* send the EOFs for the finished inputs */
2019  for (int i = 0; i < fg->nb_inputs; i++) {
2021  if (fgt->eof_in[i]) {
2023  if (ret < 0)
2024  goto fail;
2025  have_input_eof = 1;
2026  }
2027  }
2028 
2029  if (have_input_eof) {
2030  // make sure the EOF propagates to the end of the graph
2032  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2033  goto fail;
2034  }
2035 
2036  return 0;
2037 fail:
2038  cleanup_filtergraph(fg, fgt);
2039  return ret;
2040 }
2041 
2043 {
2044  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2045  AVFrameSideData *sd;
2046  int ret;
2047 
2048  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2049  if (ret < 0)
2050  return ret;
2051 
2052  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2053  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2054  frame->time_base;
2055 
2056  ifp->format = frame->format;
2057 
2058  ifp->width = frame->width;
2059  ifp->height = frame->height;
2060  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2061  ifp->color_space = frame->colorspace;
2062  ifp->color_range = frame->color_range;
2063 
2064  ifp->sample_rate = frame->sample_rate;
2065  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2066  if (ret < 0)
2067  return ret;
2068 
2070  if (sd)
2071  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2072  ifp->displaymatrix_present = !!sd;
2073 
2074  return 0;
2075 }
2076 
2078 {
2079  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2080  return fgp->is_simple;
2081 }
2082 
2083 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2084  double time, const char *target,
2085  const char *command, const char *arg, int all_filters)
2086 {
2087  int ret;
2088 
2089  if (!graph)
2090  return;
2091 
2092  if (time < 0) {
2093  char response[4096];
2094  ret = avfilter_graph_send_command(graph, target, command, arg,
2095  response, sizeof(response),
2096  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2097  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2098  fg->index, ret, response);
2099  } else if (!all_filters) {
2100  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2101  } else {
2102  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2103  if (ret < 0)
2104  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2105  }
2106 }
2107 
2108 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2109 {
2110  int nb_requests, nb_requests_max = -1;
2111  int best_input = -1;
2112 
2113  for (int i = 0; i < fg->nb_inputs; i++) {
2114  InputFilter *ifilter = fg->inputs[i];
2115  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2116 
2117  if (fgt->eof_in[i])
2118  continue;
2119 
2120  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2121  if (nb_requests > nb_requests_max) {
2122  nb_requests_max = nb_requests;
2123  best_input = i;
2124  }
2125  }
2126 
2127  av_assert0(best_input >= 0);
2128 
2129  return best_input;
2130 }
2131 
2133 {
2134  OutputFilter *ofilter = &ofp->ofilter;
2135  FPSConvContext *fps = &ofp->fps;
2136  AVRational tb = (AVRational){ 0, 0 };
2137  AVRational fr;
2138  const FrameData *fd;
2139 
2140  fd = frame_data_c(frame);
2141 
2142  // apply -enc_time_base
2143  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2144  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2145  av_log(ofp, AV_LOG_ERROR,
2146  "Demuxing timebase not available - cannot use it for encoding\n");
2147  return AVERROR(EINVAL);
2148  }
2149 
2150  switch (ofp->enc_timebase.num) {
2151  case 0: break;
2152  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2153  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2154  default: tb = ofp->enc_timebase; break;
2155  }
2156 
2157  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2158  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2159  goto finish;
2160  }
2161 
2162  fr = fps->framerate;
2163  if (!fr.num) {
2165  if (fr_sink.num > 0 && fr_sink.den > 0)
2166  fr = fr_sink;
2167  }
2168 
2169  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2170  if (!fr.num && !fps->framerate_max.num) {
2171  fr = (AVRational){25, 1};
2172  av_log(ofp, AV_LOG_WARNING,
2173  "No information "
2174  "about the input framerate is available. Falling "
2175  "back to a default value of 25fps. Use the -r option "
2176  "if you want a different framerate.\n");
2177  }
2178 
2179  if (fps->framerate_max.num &&
2180  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2181  !fr.den))
2182  fr = fps->framerate_max;
2183  }
2184 
2185  if (fr.num > 0) {
2186  if (fps->framerate_supported) {
2187  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2188  fr = fps->framerate_supported[idx];
2189  }
2190  if (fps->framerate_clip) {
2191  av_reduce(&fr.num, &fr.den,
2192  fr.num, fr.den, fps->framerate_clip);
2193  }
2194  }
2195 
2196  if (!(tb.num > 0 && tb.den > 0))
2197  tb = av_inv_q(fr);
2198  if (!(tb.num > 0 && tb.den > 0))
2199  tb = frame->time_base;
2200 
2201  fps->framerate = fr;
2202 finish:
2203  ofp->tb_out = tb;
2204  ofp->tb_out_locked = 1;
2205 
2206  return 0;
2207 }
2208 
2209 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2210  AVRational tb_dst, int64_t start_time)
2211 {
2212  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2213 
2214  AVRational tb = tb_dst;
2215  AVRational filter_tb = frame->time_base;
2216  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2217 
2218  if (frame->pts == AV_NOPTS_VALUE)
2219  goto early_exit;
2220 
2221  tb.den <<= extra_bits;
2222  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2224  float_pts /= 1 << extra_bits;
2225  // when float_pts is not exactly an integer,
2226  // avoid exact midpoints to reduce the chance of rounding differences, this
2227  // can be removed in case the fps code is changed to work with integers
2228  if (float_pts != llrint(float_pts))
2229  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2230 
2231  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2233  frame->time_base = tb_dst;
2234 
2235 early_exit:
2236 
2237  if (debug_ts) {
2238  av_log(logctx, AV_LOG_INFO,
2239  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2240  frame ? av_ts2str(frame->pts) : "NULL",
2241  av_ts2timestr(frame->pts, &tb_dst),
2242  float_pts, tb_dst.num, tb_dst.den);
2243  }
2244 
2245  return float_pts;
2246 }
2247 
2248 /* Convert frame timestamps to the encoder timebase and decide how many times
2249  * should this (and possibly previous) frame be repeated in order to conform to
2250  * desired target framerate (if any).
2251  */
2253  int64_t *nb_frames, int64_t *nb_frames_prev)
2254 {
2255  OutputFilter *ofilter = &ofp->ofilter;
2256  FPSConvContext *fps = &ofp->fps;
2257  double delta0, delta, sync_ipts, duration;
2258 
2259  if (!frame) {
2260  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2261  fps->frames_prev_hist[1],
2262  fps->frames_prev_hist[2]);
2263 
2264  if (!*nb_frames && fps->last_dropped) {
2265  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2266  fps->last_dropped++;
2267  }
2268 
2269  goto finish;
2270  }
2271 
2272  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2273 
2274  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2275  ofp->tb_out, ofp->ts_offset);
2276  /* delta0 is the "drift" between the input frame and
2277  * where it would fall in the output. */
2278  delta0 = sync_ipts - ofp->next_pts;
2279  delta = delta0 + duration;
2280 
2281  // tracks the number of times the PREVIOUS frame should be duplicated,
2282  // mostly for variable framerate (VFR)
2283  *nb_frames_prev = 0;
2284  /* by default, we output a single frame */
2285  *nb_frames = 1;
2286 
2287  if (delta0 < 0 &&
2288  delta > 0 &&
2291  && fps->vsync_method != VSYNC_DROP
2292 #endif
2293  ) {
2294  if (delta0 < -0.6) {
2295  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2296  } else
2297  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2298  sync_ipts = ofp->next_pts;
2299  duration += delta0;
2300  delta0 = 0;
2301  }
2302 
2303  switch (fps->vsync_method) {
2304  case VSYNC_VSCFR:
2305  if (fps->frame_number == 0 && delta0 >= 0.5) {
2306  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2307  delta = duration;
2308  delta0 = 0;
2309  ofp->next_pts = llrint(sync_ipts);
2310  }
2311  case VSYNC_CFR:
2312  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2313  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2314  *nb_frames = 0;
2315  } else if (delta < -1.1)
2316  *nb_frames = 0;
2317  else if (delta > 1.1) {
2318  *nb_frames = llrintf(delta);
2319  if (delta0 > 1.1)
2320  *nb_frames_prev = llrintf(delta0 - 0.6);
2321  }
2322  frame->duration = 1;
2323  break;
2324  case VSYNC_VFR:
2325  if (delta <= -0.6)
2326  *nb_frames = 0;
2327  else if (delta > 0.6)
2328  ofp->next_pts = llrint(sync_ipts);
2329  frame->duration = llrint(duration);
2330  break;
2331 #if FFMPEG_OPT_VSYNC_DROP
2332  case VSYNC_DROP:
2333 #endif
2334  case VSYNC_PASSTHROUGH:
2335  ofp->next_pts = llrint(sync_ipts);
2336  frame->duration = llrint(duration);
2337  break;
2338  default:
2339  av_assert0(0);
2340  }
2341 
2342 finish:
2343  memmove(fps->frames_prev_hist + 1,
2344  fps->frames_prev_hist,
2345  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2346  fps->frames_prev_hist[0] = *nb_frames_prev;
2347 
2348  if (*nb_frames_prev == 0 && fps->last_dropped) {
2349  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2350  av_log(ofp, AV_LOG_VERBOSE,
2351  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2352  fps->frame_number, fps->last_frame->pts);
2353  }
2354  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2355  uint64_t nb_frames_dup;
2356  if (*nb_frames > dts_error_threshold * 30) {
2357  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2358  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2359  *nb_frames = 0;
2360  return;
2361  }
2362  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2363  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2364  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2365  if (nb_frames_dup > fps->dup_warning) {
2366  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2367  fps->dup_warning *= 10;
2368  }
2369  }
2370 
2371  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2372  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2373 }
2374 
2376 {
2378  int ret;
2379 
2380  // we are finished and no frames were ever seen at this output,
2381  // at least initialize the encoder with a dummy frame
2382  if (!fgt->got_frame) {
2383  AVFrame *frame = fgt->frame;
2384  FrameData *fd;
2385 
2386  frame->time_base = ofp->tb_out;
2387  frame->format = ofp->format;
2388 
2389  frame->width = ofp->width;
2390  frame->height = ofp->height;
2391  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2392 
2393  frame->sample_rate = ofp->sample_rate;
2394  if (ofp->ch_layout.nb_channels) {
2395  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2396  if (ret < 0)
2397  return ret;
2398  }
2399 
2400  fd = frame_data(frame);
2401  if (!fd)
2402  return AVERROR(ENOMEM);
2403 
2404  fd->frame_rate_filter = ofp->fps.framerate;
2405 
2406  av_assert0(!frame->buf[0]);
2407 
2408  av_log(ofp, AV_LOG_WARNING,
2409  "No filtered frames for output stream, trying to "
2410  "initialize anyway.\n");
2411 
2412  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2413  if (ret < 0) {
2415  return ret;
2416  }
2417  }
2418 
2419  fgt->eof_out[ofp->index] = 1;
2420 
2421  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2422  return (ret == AVERROR_EOF) ? 0 : ret;
2423 }
2424 
2426  AVFrame *frame)
2427 {
2429  AVFrame *frame_prev = ofp->fps.last_frame;
2430  enum AVMediaType type = ofp->ofilter.type;
2431 
2432  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2433 
2434  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2435  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2436 
2437  for (int64_t i = 0; i < nb_frames; i++) {
2438  AVFrame *frame_out;
2439  int ret;
2440 
2441  if (type == AVMEDIA_TYPE_VIDEO) {
2442  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2443  frame_prev : frame;
2444  if (!frame_in)
2445  break;
2446 
2447  frame_out = fgp->frame_enc;
2448  ret = av_frame_ref(frame_out, frame_in);
2449  if (ret < 0)
2450  return ret;
2451 
2452  frame_out->pts = ofp->next_pts;
2453 
2454  if (ofp->fps.dropped_keyframe) {
2455  frame_out->flags |= AV_FRAME_FLAG_KEY;
2456  ofp->fps.dropped_keyframe = 0;
2457  }
2458  } else {
2459  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2460  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2462 
2463  frame->time_base = ofp->tb_out;
2464  frame->duration = av_rescale_q(frame->nb_samples,
2465  (AVRational){ 1, frame->sample_rate },
2466  ofp->tb_out);
2467 
2468  ofp->next_pts = frame->pts + frame->duration;
2469 
2470  frame_out = frame;
2471  }
2472 
2473  // send the frame to consumers
2474  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2475  if (ret < 0) {
2476  av_frame_unref(frame_out);
2477 
2478  if (!fgt->eof_out[ofp->index]) {
2479  fgt->eof_out[ofp->index] = 1;
2480  fgp->nb_outputs_done++;
2481  }
2482 
2483  return ret == AVERROR_EOF ? 0 : ret;
2484  }
2485 
2486  if (type == AVMEDIA_TYPE_VIDEO) {
2487  ofp->fps.frame_number++;
2488  ofp->next_pts++;
2489 
2490  if (i == nb_frames_prev && frame)
2491  frame->flags &= ~AV_FRAME_FLAG_KEY;
2492  }
2493 
2494  fgt->got_frame = 1;
2495  }
2496 
2497  if (frame && frame_prev) {
2498  av_frame_unref(frame_prev);
2499  av_frame_move_ref(frame_prev, frame);
2500  }
2501 
2502  if (!frame)
2503  return close_output(ofp, fgt);
2504 
2505  return 0;
2506 }
2507 
2509  AVFrame *frame)
2510 {
2512  AVFilterContext *filter = ofp->filter;
2513  FrameData *fd;
2514  int ret;
2515 
2518  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2519  ret = fg_output_frame(ofp, fgt, NULL);
2520  return (ret < 0) ? ret : 1;
2521  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2522  return 1;
2523  } else if (ret < 0) {
2524  av_log(ofp, AV_LOG_WARNING,
2525  "Error in retrieving a frame from the filtergraph: %s\n",
2526  av_err2str(ret));
2527  return ret;
2528  }
2529 
2530  if (fgt->eof_out[ofp->index]) {
2532  return 0;
2533  }
2534 
2536 
2537  if (debug_ts)
2538  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2539  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2540  frame->time_base.num, frame->time_base.den);
2541 
2542  // Choose the output timebase the first time we get a frame.
2543  if (!ofp->tb_out_locked) {
2544  ret = choose_out_timebase(ofp, frame);
2545  if (ret < 0) {
2546  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2548  return ret;
2549  }
2550  }
2551 
2552  fd = frame_data(frame);
2553  if (!fd) {
2555  return AVERROR(ENOMEM);
2556  }
2557 
2559 
2560  // only use bits_per_raw_sample passed through from the decoder
2561  // if the filtergraph did not touch the frame data
2562  if (!fgp->is_meta)
2563  fd->bits_per_raw_sample = 0;
2564 
2565  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2566  if (!frame->duration) {
2568  if (fr.num > 0 && fr.den > 0)
2569  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2570  }
2571 
2572  fd->frame_rate_filter = ofp->fps.framerate;
2573  }
2574 
2575  ret = fg_output_frame(ofp, fgt, frame);
2577  if (ret < 0)
2578  return ret;
2579 
2580  return 0;
2581 }
2582 
2583 /* retrieve all frames available at filtergraph outputs
2584  * and send them to consumers */
2586  AVFrame *frame)
2587 {
2588  FilterGraphPriv *fgp = fgp_from_fg(fg);
2589  int did_step = 0;
2590 
2591  // graph not configured, just select the input to request
2592  if (!fgt->graph) {
2593  for (int i = 0; i < fg->nb_inputs; i++) {
2595  if (ifp->format < 0 && !fgt->eof_in[i]) {
2596  fgt->next_in = i;
2597  return 0;
2598  }
2599  }
2600 
2601  // This state - graph is not configured, but all inputs are either
2602  // initialized or EOF - should be unreachable because sending EOF to a
2603  // filter without even a fallback format should fail
2604  av_assert0(0);
2605  return AVERROR_BUG;
2606  }
2607 
2608  while (fgp->nb_outputs_done < fg->nb_outputs) {
2609  int ret;
2610 
2612  if (ret == AVERROR(EAGAIN)) {
2613  fgt->next_in = choose_input(fg, fgt);
2614  break;
2615  } else if (ret < 0) {
2616  if (ret == AVERROR_EOF)
2617  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2618  else
2619  av_log(fg, AV_LOG_ERROR,
2620  "Error requesting a frame from the filtergraph: %s\n",
2621  av_err2str(ret));
2622  return ret;
2623  }
2624  fgt->next_in = fg->nb_inputs;
2625 
2626  // return after one iteration, so that scheduler can rate-control us
2627  if (did_step && fgp->have_sources)
2628  return 0;
2629 
2630  /* Reap all buffers present in the buffer sinks */
2631  for (int i = 0; i < fg->nb_outputs; i++) {
2633 
2634  ret = 0;
2635  while (!ret) {
2636  ret = fg_output_step(ofp, fgt, frame);
2637  if (ret < 0)
2638  return ret;
2639  }
2640  }
2641  did_step = 1;
2642  }
2643 
2644  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2645 }
2646 
2648 {
2649  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2650  int64_t pts2;
2651 
2652  /* subtitles seem to be usually muxed ahead of other streams;
2653  if not, subtracting a larger time here is necessary */
2654  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2655 
2656  /* do not send the heartbeat frame if the subtitle is already ahead */
2657  if (pts2 <= ifp->sub2video.last_pts)
2658  return;
2659 
2660  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2661  /* if we have hit the end of the current displayed subpicture,
2662  or if we need to initialize the system, update the
2663  overlayed subpicture and its start/end times */
2664  sub2video_update(ifp, pts2 + 1, NULL);
2665  else
2666  sub2video_push_ref(ifp, pts2);
2667 }
2668 
2669 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2670 {
2671  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2672  int ret;
2673 
2674  if (buffer) {
2675  AVFrame *tmp;
2676 
2677  if (!frame)
2678  return 0;
2679 
2680  tmp = av_frame_alloc();
2681  if (!tmp)
2682  return AVERROR(ENOMEM);
2683 
2685 
2686  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2687  if (ret < 0) {
2688  av_frame_free(&tmp);
2689  return ret;
2690  }
2691 
2692  return 0;
2693  }
2694 
2695  // heartbeat frame
2696  if (frame && !frame->buf[0]) {
2697  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2698  return 0;
2699  }
2700 
2701  if (!frame) {
2702  if (ifp->sub2video.end_pts < INT64_MAX)
2703  sub2video_update(ifp, INT64_MAX, NULL);
2704 
2705  return av_buffersrc_add_frame(ifp->filter, NULL);
2706  }
2707 
2708  ifp->width = frame->width ? frame->width : ifp->width;
2709  ifp->height = frame->height ? frame->height : ifp->height;
2710 
2711  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2712 
2713  return 0;
2714 }
2715 
2716 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2717  int64_t pts, AVRational tb)
2718 {
2719  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2720  int ret;
2721 
2722  if (fgt->eof_in[ifp->index])
2723  return 0;
2724 
2725  fgt->eof_in[ifp->index] = 1;
2726 
2727  if (ifp->filter) {
2728  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2730 
2732  if (ret < 0)
2733  return ret;
2734  } else {
2735  if (ifp->format < 0) {
2736  // the filtergraph was never configured, use the fallback parameters
2737  ifp->format = ifp->opts.fallback->format;
2738  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2739  ifp->width = ifp->opts.fallback->width;
2740  ifp->height = ifp->opts.fallback->height;
2742  ifp->color_space = ifp->opts.fallback->colorspace;
2743  ifp->color_range = ifp->opts.fallback->color_range;
2744  ifp->time_base = ifp->opts.fallback->time_base;
2745 
2747  &ifp->opts.fallback->ch_layout);
2748  if (ret < 0)
2749  return ret;
2750 
2751  if (ifilter_has_all_input_formats(ifilter->graph)) {
2752  ret = configure_filtergraph(ifilter->graph, fgt);
2753  if (ret < 0) {
2754  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
2755  return ret;
2756  }
2757  }
2758  }
2759 
2760  if (ifp->format < 0) {
2761  av_log(ifilter->graph, AV_LOG_ERROR,
2762  "Cannot determine format of input %s after EOF\n",
2763  ifp->opts.name);
2764  return AVERROR_INVALIDDATA;
2765  }
2766  }
2767 
2768  return 0;
2769 }
2770 
2772  VIDEO_CHANGED = (1 << 0),
2773  AUDIO_CHANGED = (1 << 1),
2774  MATRIX_CHANGED = (1 << 2),
2775  HWACCEL_CHANGED = (1 << 3)
2776 };
2777 
2778 static const char *unknown_if_null(const char *str)
2779 {
2780  return str ? str : "unknown";
2781 }
2782 
2784  InputFilter *ifilter, AVFrame *frame)
2785 {
2786  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2787  FrameData *fd;
2788  AVFrameSideData *sd;
2789  int need_reinit = 0, ret;
2790 
2791  /* determine if the parameters for this input changed */
2792  switch (ifp->type) {
2793  case AVMEDIA_TYPE_AUDIO:
2794  if (ifp->format != frame->format ||
2795  ifp->sample_rate != frame->sample_rate ||
2796  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2797  need_reinit |= AUDIO_CHANGED;
2798  break;
2799  case AVMEDIA_TYPE_VIDEO:
2800  if (ifp->format != frame->format ||
2801  ifp->width != frame->width ||
2802  ifp->height != frame->height ||
2803  ifp->color_space != frame->colorspace ||
2804  ifp->color_range != frame->color_range)
2805  need_reinit |= VIDEO_CHANGED;
2806  break;
2807  }
2808 
2810  if (!ifp->displaymatrix_present ||
2811  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2812  need_reinit |= MATRIX_CHANGED;
2813  } else if (ifp->displaymatrix_present)
2814  need_reinit |= MATRIX_CHANGED;
2815 
2816  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2817  need_reinit = 0;
2818 
2819  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2820  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2821  need_reinit |= HWACCEL_CHANGED;
2822 
2823  if (need_reinit) {
2825  if (ret < 0)
2826  return ret;
2827  }
2828 
2829  /* (re)init the graph if possible, otherwise buffer the frame and return */
2830  if (need_reinit || !fgt->graph) {
2831  AVFrame *tmp = av_frame_alloc();
2832 
2833  if (!tmp)
2834  return AVERROR(ENOMEM);
2835 
2836  if (!ifilter_has_all_input_formats(fg)) {
2838 
2839  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2840  if (ret < 0)
2841  av_frame_free(&tmp);
2842 
2843  return ret;
2844  }
2845 
2846  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2847  av_frame_free(&tmp);
2848  if (ret < 0)
2849  return ret;
2850 
2851  if (fgt->graph) {
2852  AVBPrint reason;
2854  if (need_reinit & AUDIO_CHANGED) {
2855  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2856  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2857  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2858  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2859  }
2860  if (need_reinit & VIDEO_CHANGED) {
2861  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2862  const char *color_space_name = av_color_space_name(frame->colorspace);
2863  const char *color_range_name = av_color_range_name(frame->color_range);
2864  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2865  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2866  unknown_if_null(color_space_name), frame->width, frame->height);
2867  }
2868  if (need_reinit & MATRIX_CHANGED)
2869  av_bprintf(&reason, "display matrix changed, ");
2870  if (need_reinit & HWACCEL_CHANGED)
2871  av_bprintf(&reason, "hwaccel changed, ");
2872  if (reason.len > 1)
2873  reason.str[reason.len - 2] = '\0'; // remove last comma
2874  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2875  }
2876 
2877  ret = configure_filtergraph(fg, fgt);
2878  if (ret < 0) {
2879  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2880  return ret;
2881  }
2882  }
2883 
2884  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2885  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2886  frame->time_base = ifp->time_base;
2887 
2888  if (ifp->displaymatrix_applied)
2890 
2891  fd = frame_data(frame);
2892  if (!fd)
2893  return AVERROR(ENOMEM);
2895 
2898  if (ret < 0) {
2900  if (ret != AVERROR_EOF)
2901  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2902  return ret;
2903  }
2904 
2905  return 0;
2906 }
2907 
2908 static void fg_thread_set_name(const FilterGraph *fg)
2909 {
2910  char name[16];
2911  if (filtergraph_is_simple(fg)) {
2912  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2913  snprintf(name, sizeof(name), "%cf%s",
2915  ofp->name);
2916  } else {
2917  snprintf(name, sizeof(name), "fc%d", fg->index);
2918  }
2919 
2921 }
2922 
2924 {
2925  if (fgt->frame_queue_out) {
2926  AVFrame *frame;
2927  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2928  av_frame_free(&frame);
2930  }
2931 
2932  av_frame_free(&fgt->frame);
2933  av_freep(&fgt->eof_in);
2934  av_freep(&fgt->eof_out);
2935 
2936  avfilter_graph_free(&fgt->graph);
2937 
2938  memset(fgt, 0, sizeof(*fgt));
2939 }
2940 
2941 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2942 {
2943  memset(fgt, 0, sizeof(*fgt));
2944 
2945  fgt->frame = av_frame_alloc();
2946  if (!fgt->frame)
2947  goto fail;
2948 
2949  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2950  if (!fgt->eof_in)
2951  goto fail;
2952 
2953  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2954  if (!fgt->eof_out)
2955  goto fail;
2956 
2958  if (!fgt->frame_queue_out)
2959  goto fail;
2960 
2961  return 0;
2962 
2963 fail:
2964  fg_thread_uninit(fgt);
2965  return AVERROR(ENOMEM);
2966 }
2967 
2968 static int filter_thread(void *arg)
2969 {
2970  FilterGraphPriv *fgp = arg;
2971  FilterGraph *fg = &fgp->fg;
2972 
2973  FilterGraphThread fgt;
2974  int ret = 0, input_status = 0;
2975 
2976  ret = fg_thread_init(&fgt, fg);
2977  if (ret < 0)
2978  goto finish;
2979 
2980  fg_thread_set_name(fg);
2981 
2982  // if we have all input parameters the graph can now be configured
2984  ret = configure_filtergraph(fg, &fgt);
2985  if (ret < 0) {
2986  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2987  av_err2str(ret));
2988  goto finish;
2989  }
2990  }
2991 
2992  while (1) {
2993  InputFilter *ifilter;
2994  InputFilterPriv *ifp;
2995  enum FrameOpaque o;
2996  unsigned input_idx = fgt.next_in;
2997 
2998  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2999  &input_idx, fgt.frame);
3000  if (input_status == AVERROR_EOF) {
3001  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3002  break;
3003  } else if (input_status == AVERROR(EAGAIN)) {
3004  // should only happen when we didn't request any input
3005  av_assert0(input_idx == fg->nb_inputs);
3006  goto read_frames;
3007  }
3008  av_assert0(input_status >= 0);
3009 
3010  o = (intptr_t)fgt.frame->opaque;
3011 
3012  o = (intptr_t)fgt.frame->opaque;
3013 
3014  // message on the control stream
3015  if (input_idx == fg->nb_inputs) {
3016  FilterCommand *fc;
3017 
3018  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3019 
3020  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3021  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3022  fc->all_filters);
3023  av_frame_unref(fgt.frame);
3024  continue;
3025  }
3026 
3027  // we received an input frame or EOF
3028  ifilter = fg->inputs[input_idx];
3029  ifp = ifp_from_ifilter(ifilter);
3030 
3031  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3032  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3033  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3034  !fgt.graph);
3035  } else if (fgt.frame->buf[0]) {
3036  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3037  } else {
3039  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3040  }
3041  av_frame_unref(fgt.frame);
3042  if (ret == AVERROR_EOF) {
3043  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3044  input_idx);
3045  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
3046  continue;
3047  }
3048  if (ret < 0)
3049  goto finish;
3050 
3051 read_frames:
3052  // retrieve all newly avalable frames
3053  ret = read_frames(fg, &fgt, fgt.frame);
3054  if (ret == AVERROR_EOF) {
3055  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3056  break;
3057  } else if (ret < 0) {
3058  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3059  av_err2str(ret));
3060  goto finish;
3061  }
3062  }
3063 
3064  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3066 
3067  if (fgt.eof_out[i] || !fgt.graph)
3068  continue;
3069 
3070  ret = fg_output_frame(ofp, &fgt, NULL);
3071  if (ret < 0)
3072  goto finish;
3073  }
3074 
3075 finish:
3076  // EOF is normal termination
3077  if (ret == AVERROR_EOF)
3078  ret = 0;
3079 
3080  fg_thread_uninit(&fgt);
3081 
3082  return ret;
3083 }
3084 
3085 void fg_send_command(FilterGraph *fg, double time, const char *target,
3086  const char *command, const char *arg, int all_filters)
3087 {
3088  FilterGraphPriv *fgp = fgp_from_fg(fg);
3089  AVBufferRef *buf;
3090  FilterCommand *fc;
3091 
3092  fc = av_mallocz(sizeof(*fc));
3093  if (!fc)
3094  return;
3095 
3096  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3097  if (!buf) {
3098  av_freep(&fc);
3099  return;
3100  }
3101 
3102  fc->target = av_strdup(target);
3103  fc->command = av_strdup(command);
3104  fc->arg = av_strdup(arg);
3105  if (!fc->target || !fc->command || !fc->arg) {
3106  av_buffer_unref(&buf);
3107  return;
3108  }
3109 
3110  fc->time = time;
3111  fc->all_filters = all_filters;
3112 
3113  fgp->frame->buf[0] = buf;
3114  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3115 
3116  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3117 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2238
formats
formats
Definition: signature.h:47
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1848
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:95
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:661
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:668
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:353
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:629
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:357
av_clip
#define av_clip
Definition: common.h:100
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:121
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2468
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:355
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:241
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:198
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:105
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2243
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2108
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1477
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:65
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:429
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:69
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:97
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:807
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:138
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:982
FrameData
Definition: ffmpeg.h:655
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2083
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:153
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:282
OutputFilter::apad
char * apad
Definition: ffmpeg.h:365
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:304
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:963
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:111
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:377
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:142
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:280
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2042
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1009
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:633
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2775
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:62
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:977
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:140
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1671
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:247
AVSubtitleRect
Definition: avcodec.h:2211
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2242
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1748
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:985
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:173
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:537
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:679
InputFile::index
int index
Definition: ffmpeg.h:471
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:941
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
AVFrame::width
int width
Definition: frame.h:461
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:48
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:55
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:790
AVOption
AVOption.
Definition: opt.h:429
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2425
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:173
FilterGraph::index
int index
Definition: ffmpeg.h:375
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:137
data
const char data[16]
Definition: mxf.c:149
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:177
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:232
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1858
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:472
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:378
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2772
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:128
AVDictionary
Definition: dict.c:34
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:661
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:239
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:322
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:260
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:327
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1589
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:602
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2508
FilterGraphPriv
Definition: ffmpeg_filter.c:44
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:597
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:100
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1893
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:194
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1000
InputStream
Definition: ffmpeg.h:434
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:76
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:70
OutputFilterOptions
Definition: ffmpeg.h:300
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:267
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:288
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:138
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3546
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.c:188
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2923
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:478
fail
#define fail()
Definition: checkasm.h:200
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
OutputFilterPriv::name
char * name
Definition: ffmpeg_filter.c:196
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:317
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:264
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:603
val
static double val(void *priv, double ch)
Definition: aeval.c:77
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:191
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:790
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1692
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1874
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:87
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:141
FrameData::tb
AVRational tb
Definition: ffmpeg.h:665
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:217
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:73
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:203
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputFilterPriv::sub2video
struct InputFilterPriv::@8 sub2video
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:178
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:362
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:268
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:829
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2647
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:229
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:668
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2716
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:104
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1248
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:333
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:957
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1117
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:253
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:666
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:67
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:114
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:379
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:627
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:117
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:721
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:363
InputFilter
Definition: ffmpeg.h:349
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:58
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:297
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2244
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:2941
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:270
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:279
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:350
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:234
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1041
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1195
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:223
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:202
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:281
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3486
AVFormatContext
Format I/O context.
Definition: avformat.h:1300
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:634
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:771
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
FilterGraphPriv::nb_threads
char * nb_threads
Definition: ffmpeg_filter.c:62
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:358
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1294
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:228
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:134
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:895
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:750
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:204
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1049
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:175
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:442
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:160
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:146
Decoder
Definition: ffmpeg.h:420
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1060
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:296
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1118
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1192
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:783
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:118
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:900
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:215
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:641
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2375
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:92
mathops.h
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:70
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:672
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1425
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1141
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1224
AVFilterGraph
Definition: avfilter.h:781
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:266
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:133
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:683
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:380
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:222
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:880
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:450
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:104
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:275
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:45
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:189
FilterGraph
Definition: ffmpeg.h:373
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1130
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1495
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:284
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:938
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:277
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:786
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2077
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:65
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1991
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:261
f
f
Definition: af_crystalizer.c:122
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:2968
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:144
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:84
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:400
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:148
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:317
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:226
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:85
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:104
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:935
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:588
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:213
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:173
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:135
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2774
FilterCommand::time
double time
Definition: ffmpeg_filter.c:249
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:157
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:147
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1342
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:516
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:476
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:56
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:453
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2241
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:101
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:60
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1467
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:180
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1029
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:132
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:475
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1867
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1404
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2773
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2403
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2778
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:285
decoders
Decoder ** decoders
Definition: ffmpeg.c:113
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:193
nb_decoders
int nb_decoders
Definition: ffmpeg.c:114
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:367
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2585
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:807
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2194
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2783
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:954
buffersink.h
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:903
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:837
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:206
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:250
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:185
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:169
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:117
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:534
FPSConvContext
Definition: ffmpeg_filter.c:166
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:109
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:670
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3085
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:50
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:290
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:192
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:422
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1807
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:640
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:183
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:473
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:649
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:78
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:558
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:126
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:643
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:110
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:307
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:205
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:927
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2513
AVFilter
Filter definition.
Definition: avfilter.h:201
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2252
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:161
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1055
mid_pred
#define mid_pred
Definition: mathops.h:96
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:90
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:748
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:203
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:351
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:781
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:167
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:487
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1484
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1101
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:233
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2771
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:496
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:987
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:218
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:461
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:754
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:263
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:374
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:615
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:127
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:913
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:438
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1116
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:142
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:124
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:440
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:53
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1433
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:262
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:168
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:920
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2132
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:236
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:224
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:286
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1142
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:374
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:447
OutputFilter
Definition: ffmpeg.h:354
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2669
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:105
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1510
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:129
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:319
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:225
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:491
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2209
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:370
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:79
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:440
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2447
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:245
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1048
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:250
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:294
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:423
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:130
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:154
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:111
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:333
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:616
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:201
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1312
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:107
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:57
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:230
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:91
bind_inputs
static int bind_inputs(FilterGraph *fg)
Definition: ffmpeg_filter.c:1386
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:784
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:460
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:682
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:459
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:210
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:930
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:202
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:182
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2908
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:157
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1681
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2240
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:246
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:244
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:132
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3090
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:369
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:77
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:277
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:184