FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/mem.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/samplefmt.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
40 
41 // FIXME private header, used for mid_pred()
42 #include "libavcodec/mathops.h"
43 
44 typedef struct FilterGraphPriv {
46 
47  // name used for logging
48  char log_name[32];
49 
50  int is_simple;
51  // true when the filtergraph contains only meta filters
52  // that do not modify the frame data
53  int is_meta;
54  // source filters are present in the graph
57 
58  unsigned nb_outputs_done;
59 
60  const char *graph_desc;
61 
62  char *nb_threads;
63 
64  // frame for temporarily holding output from the filtergraph
66  // frame for sending output to the encoder
68 
70  unsigned sch_idx;
72 
74 {
75  return (FilterGraphPriv*)fg;
76 }
77 
78 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
79 {
80  return (const FilterGraphPriv*)fg;
81 }
82 
83 // data that is local to the filter thread and not visible outside of it
84 typedef struct FilterGraphThread {
86 
88 
89  // Temporary buffer for output frames, since on filtergraph reset
90  // we cannot send them to encoders immediately.
91  // The output index is stored in frame opaque.
93 
94  // index of the next input to request from the scheduler
95  unsigned next_in;
96  // set to 1 after at least one frame passed through this output
97  int got_frame;
98 
99  // EOF status of each input/output, as received by the thread
100  uint8_t *eof_in;
101  uint8_t *eof_out;
103 
104 typedef struct InputFilterPriv {
106 
108 
109  int index;
110 
112 
113  // used to hold submitted input
115 
116  /* for filters that are not yet bound to an input stream,
117  * this stores the input linklabel, if any */
118  uint8_t *linklabel;
119 
120  // filter data type
122  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
123  // same as type otherwise
125 
126  int eof;
127  int bound;
128 
129  // parameters configured for this input
130  int format;
131 
132  int width, height;
136 
139 
141 
143 
145 
149 
150  struct {
151  AVFrame *frame;
152 
153  int64_t last_pts;
154  int64_t end_pts;
155 
156  ///< marks if sub2video_update should force an initialization
157  unsigned int initialize;
158  } sub2video;
160 
162 {
163  return (InputFilterPriv*)ifilter;
164 }
165 
166 typedef struct FPSConvContext {
168  /* number of frames emitted by the video-encoding sync code */
169  int64_t frame_number;
170  /* history of nb_frames_prev, i.e. the number of times the
171  * previous frame was duplicated by vsync code in recent
172  * do_video_out() calls */
173  int64_t frames_prev_hist[3];
174 
175  uint64_t dup_warning;
176 
179 
181 
187 
188 typedef struct OutputFilterPriv {
190 
191  int index;
192 
193  void *log_parent;
194  char log_name[32];
195 
196  char *name;
197 
199 
200  /* desired output stream properties */
201  int format;
202  int width, height;
205 
206  // time base in which the output is sent to our downstream
207  // does not need to match the filtersink's timebase
209  // at least one frame with the above timebase was sent
210  // to our downstream, so it cannot change anymore
212 
214 
217 
218  // those are only set if no format is specified and the encoder gives us multiple options
219  // They point directly to the relevant lists of the encoder.
220  const int *formats;
222  const int *sample_rates;
223 
225  int64_t trim_start_us;
227  // offset for output timestamps, in AV_TIME_BASE_Q
228  int64_t ts_offset;
229  int64_t next_pts;
231 
232  unsigned flags;
234 
236 {
237  return (OutputFilterPriv*)ofilter;
238 }
239 
240 typedef struct FilterCommand {
241  char *target;
242  char *command;
243  char *arg;
244 
245  double time;
247 } FilterCommand;
248 
249 static void filter_command_free(void *opaque, uint8_t *data)
250 {
252 
253  av_freep(&fc->target);
254  av_freep(&fc->command);
255  av_freep(&fc->arg);
256 
257  av_free(data);
258 }
259 
261 {
262  AVFrame *frame = ifp->sub2video.frame;
263  int ret;
264 
266 
267  frame->width = ifp->width;
268  frame->height = ifp->height;
269  frame->format = ifp->format;
270  frame->colorspace = ifp->color_space;
271  frame->color_range = ifp->color_range;
272 
274  if (ret < 0)
275  return ret;
276 
277  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
278 
279  return 0;
280 }
281 
282 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
283  AVSubtitleRect *r)
284 {
285  uint32_t *pal, *dst2;
286  uint8_t *src, *src2;
287  int x, y;
288 
289  if (r->type != SUBTITLE_BITMAP) {
290  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
291  return;
292  }
293  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
294  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
295  r->x, r->y, r->w, r->h, w, h
296  );
297  return;
298  }
299 
300  dst += r->y * dst_linesize + r->x * 4;
301  src = r->data[0];
302  pal = (uint32_t *)r->data[1];
303  for (y = 0; y < r->h; y++) {
304  dst2 = (uint32_t *)dst;
305  src2 = src;
306  for (x = 0; x < r->w; x++)
307  *(dst2++) = pal[*(src2++)];
308  dst += dst_linesize;
309  src += r->linesize[0];
310  }
311 }
312 
313 static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
314 {
315  AVFrame *frame = ifp->sub2video.frame;
316  int ret;
317 
318  av_assert1(frame->data[0]);
319  ifp->sub2video.last_pts = frame->pts = pts;
323  if (ret != AVERROR_EOF && ret < 0)
324  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
325  av_err2str(ret));
326 }
327 
328 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
329  const AVSubtitle *sub)
330 {
331  AVFrame *frame = ifp->sub2video.frame;
332  int8_t *dst;
333  int dst_linesize;
334  int num_rects;
335  int64_t pts, end_pts;
336 
337  if (sub) {
338  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
339  AV_TIME_BASE_Q, ifp->time_base);
340  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
341  AV_TIME_BASE_Q, ifp->time_base);
342  num_rects = sub->num_rects;
343  } else {
344  /* If we are initializing the system, utilize current heartbeat
345  PTS as the start time, and show until the following subpicture
346  is received. Otherwise, utilize the previous subpicture's end time
347  as the fall-back value. */
348  pts = ifp->sub2video.initialize ?
349  heartbeat_pts : ifp->sub2video.end_pts;
350  end_pts = INT64_MAX;
351  num_rects = 0;
352  }
353  if (sub2video_get_blank_frame(ifp) < 0) {
355  "Impossible to get a blank canvas.\n");
356  return;
357  }
358  dst = frame->data [0];
359  dst_linesize = frame->linesize[0];
360  for (int i = 0; i < num_rects; i++)
361  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
362  sub2video_push_ref(ifp, pts);
363  ifp->sub2video.end_pts = end_pts;
364  ifp->sub2video.initialize = 0;
365 }
366 
367 /* *dst may return be set to NULL (no pixel format found), a static string or a
368  * string backed by the bprint. Nothing has been written to the AVBPrint in case
369  * NULL is returned. The AVBPrint provided should be clean. */
370 static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint,
371  const char **dst)
372 {
373  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
374 
375  *dst = NULL;
376 
378  *dst = ofp->format == AV_PIX_FMT_NONE ? NULL :
380  } else if (ofp->formats) {
381  const enum AVPixelFormat *p = ofp->formats;
382 
383  for (; *p != AV_PIX_FMT_NONE; p++) {
384  const char *name = av_get_pix_fmt_name(*p);
385  av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
386  }
387  if (!av_bprint_is_complete(bprint))
388  return AVERROR(ENOMEM);
389 
390  *dst = bprint->str;
391  }
392 
393  return 0;
394 }
395 
396 /* Define a function for appending a list of allowed formats
397  * to an AVBPrint. If nonempty, the list will have a header. */
398 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
399 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
400 { \
401  if (ofp->var == none && !ofp->supported_list) \
402  return; \
403  av_bprintf(bprint, #name "="); \
404  if (ofp->var != none) { \
405  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
406  } else { \
407  const type *p; \
408  \
409  for (p = ofp->supported_list; *p != none; p++) { \
410  av_bprintf(bprint, printf_format "|", get_name(*p)); \
411  } \
412  if (bprint->len > 0) \
413  bprint->str[--bprint->len] = '\0'; \
414  } \
415  av_bprint_chars(bprint, ':', 1); \
416 }
417 
418 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
419 // GET_PIX_FMT_NAME)
420 
423 
425  "%d", )
426 
427 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
428 {
429  if (av_channel_layout_check(&ofp->ch_layout)) {
430  av_bprintf(bprint, "channel_layouts=");
431  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
432  } else if (ofp->ch_layouts) {
433  const AVChannelLayout *p;
434 
435  av_bprintf(bprint, "channel_layouts=");
436  for (p = ofp->ch_layouts; p->nb_channels; p++) {
438  av_bprintf(bprint, "|");
439  }
440  if (bprint->len > 0)
441  bprint->str[--bprint->len] = '\0';
442  } else
443  return;
444  av_bprint_chars(bprint, ':', 1);
445 }
446 
447 static int read_binary(const char *path, uint8_t **data, int *len)
448 {
449  AVIOContext *io = NULL;
450  int64_t fsize;
451  int ret;
452 
453  *data = NULL;
454  *len = 0;
455 
456  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
457  if (ret < 0) {
458  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
459  path, av_err2str(ret));
460  return ret;
461  }
462 
463  fsize = avio_size(io);
464  if (fsize < 0 || fsize > INT_MAX) {
465  av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
466  ret = AVERROR(EIO);
467  goto fail;
468  }
469 
470  *data = av_malloc(fsize);
471  if (!*data) {
472  ret = AVERROR(ENOMEM);
473  goto fail;
474  }
475 
476  ret = avio_read(io, *data, fsize);
477  if (ret != fsize) {
478  av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
479  ret = ret < 0 ? ret : AVERROR(EIO);
480  goto fail;
481  }
482 
483  *len = fsize;
484 
485  ret = 0;
486 fail:
487  avio_close(io);
488  if (ret < 0) {
489  av_freep(data);
490  *len = 0;
491  }
492  return ret;
493 }
494 
495 static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
496 {
497  const AVOption *o = NULL;
498  int ret;
499 
501  if (ret >= 0)
502  return 0;
503 
504  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
506  if (!o)
507  goto err_apply;
508 
509  // key is a valid option name prefixed with '/'
510  // interpret value as a path from which to load the actual option value
511  key++;
512 
513  if (o->type == AV_OPT_TYPE_BINARY) {
514  uint8_t *data;
515  int len;
516 
517  ret = read_binary(val, &data, &len);
518  if (ret < 0)
519  goto err_load;
520 
522  av_freep(&data);
523  } else {
524  char *data = file_read(val);
525  if (!data) {
526  ret = AVERROR(EIO);
527  goto err_load;
528  }
529 
531  av_freep(&data);
532  }
533  if (ret < 0)
534  goto err_apply;
535 
536  return 0;
537 
538 err_apply:
540  "Error applying option '%s' to filter '%s': %s\n",
541  key, f->filter->name, av_err2str(ret));
542  return ret;
543 err_load:
545  "Error loading value for option '%s' from file '%s'\n",
546  key, val);
547  return ret;
548 }
549 
551 {
552  for (size_t i = 0; i < seg->nb_chains; i++) {
553  AVFilterChain *ch = seg->chains[i];
554 
555  for (size_t j = 0; j < ch->nb_filters; j++) {
556  AVFilterParams *p = ch->filters[j];
557  const AVDictionaryEntry *e = NULL;
558 
559  av_assert0(p->filter);
560 
561  while ((e = av_dict_iterate(p->opts, e))) {
562  int ret = filter_opt_apply(p->filter, e->key, e->value);
563  if (ret < 0)
564  return ret;
565  }
566 
567  av_dict_free(&p->opts);
568  }
569  }
570 
571  return 0;
572 }
573 
574 static int graph_parse(AVFilterGraph *graph, const char *desc,
576  AVBufferRef *hw_device)
577 {
579  int ret;
580 
581  *inputs = NULL;
582  *outputs = NULL;
583 
584  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
585  if (ret < 0)
586  return ret;
587 
589  if (ret < 0)
590  goto fail;
591 
592  if (hw_device) {
593  for (int i = 0; i < graph->nb_filters; i++) {
594  AVFilterContext *f = graph->filters[i];
595 
596  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
597  continue;
598  f->hw_device_ctx = av_buffer_ref(hw_device);
599  if (!f->hw_device_ctx) {
600  ret = AVERROR(ENOMEM);
601  goto fail;
602  }
603  }
604  }
605 
606  ret = graph_opts_apply(seg);
607  if (ret < 0)
608  goto fail;
609 
611 
612 fail:
614  return ret;
615 }
616 
617 // Filters can be configured only if the formats of all inputs are known.
619 {
620  for (int i = 0; i < fg->nb_inputs; i++) {
622  if (ifp->format < 0)
623  return 0;
624  }
625  return 1;
626 }
627 
628 static int filter_thread(void *arg);
629 
630 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
631 {
632  AVFilterContext *ctx = inout->filter_ctx;
633  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
634  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
635 
636  if (nb_pads > 1)
637  return av_strdup(ctx->filter->name);
638  return av_asprintf("%s:%s", ctx->filter->name,
639  avfilter_pad_get_name(pads, inout->pad_idx));
640 }
641 
642 static const char *ofilter_item_name(void *obj)
643 {
644  OutputFilterPriv *ofp = obj;
645  return ofp->log_name;
646 }
647 
648 static const AVClass ofilter_class = {
649  .class_name = "OutputFilter",
650  .version = LIBAVUTIL_VERSION_INT,
651  .item_name = ofilter_item_name,
652  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
653  .category = AV_CLASS_CATEGORY_FILTER,
654 };
655 
657 {
658  OutputFilterPriv *ofp;
659  OutputFilter *ofilter;
660 
661  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
662  if (!ofp)
663  return NULL;
664 
665  ofilter = &ofp->ofilter;
666  ofilter->class = &ofilter_class;
667  ofp->log_parent = fg;
668  ofilter->graph = fg;
669  ofilter->type = type;
670  ofp->format = -1;
671  ofp->index = fg->nb_outputs - 1;
672 
673  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
675 
676  return ofilter;
677 }
678 
679 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
680 {
681  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
682  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
683  int ret, dec_idx;
684 
685  av_assert0(!ifp->bound);
686  ifp->bound = 1;
687 
688  if (ifp->type != ist->par->codec_type &&
690  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
692  return AVERROR(EINVAL);
693  }
694 
695  ifp->type_src = ist->st->codecpar->codec_type;
696 
697  ifp->opts.fallback = av_frame_alloc();
698  if (!ifp->opts.fallback)
699  return AVERROR(ENOMEM);
700 
701  dec_idx = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
702  &ifp->opts);
703  if (dec_idx < 0)
704  return dec_idx;
705 
706  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
707  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
708  if (ret < 0)
709  return ret;
710 
711  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
712  ifp->sub2video.frame = av_frame_alloc();
713  if (!ifp->sub2video.frame)
714  return AVERROR(ENOMEM);
715 
716  ifp->width = ifp->opts.sub2video_width;
717  ifp->height = ifp->opts.sub2video_height;
718 
719  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
720  palettes for all rectangles are identical or compatible */
721  ifp->format = AV_PIX_FMT_RGB32;
722 
723  ifp->time_base = AV_TIME_BASE_Q;
724 
725  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
726  ifp->width, ifp->height);
727  }
728 
729  return 0;
730 }
731 
733 {
735  int ret, dec_idx;
736 
737  av_assert0(!ifp->bound);
738  ifp->bound = 1;
739 
740  if (ifp->type != dec->type) {
741  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
743  return AVERROR(EINVAL);
744  }
745 
746  ifp->type_src = ifp->type;
747 
748  dec_idx = dec_filter_add(dec, &ifp->ifilter, &ifp->opts);
749  if (dec_idx < 0)
750  return dec_idx;
751 
752  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
753  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
754  if (ret < 0)
755  return ret;
756 
757  return 0;
758 }
759 
760 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
761  const AVChannelLayout *layout_requested)
762 {
763  int i, err;
764 
765  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
766  /* Pass the layout through for all orders but UNSPEC */
767  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
768  if (err < 0)
769  return err;
770  return 0;
771  }
772 
773  /* Requested layout is of order UNSPEC */
774  if (!layouts_allowed) {
775  /* Use the default native layout for the requested amount of channels when the
776  encoder doesn't have a list of supported layouts */
777  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
778  return 0;
779  }
780  /* Encoder has a list of supported layouts. Pick the first layout in it with the
781  same amount of channels as the requested layout */
782  for (i = 0; layouts_allowed[i].nb_channels; i++) {
783  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
784  break;
785  }
786  if (layouts_allowed[i].nb_channels) {
787  /* Use it if one is found */
788  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
789  if (err < 0)
790  return err;
791  return 0;
792  }
793  /* If no layout for the amount of channels requested was found, use the default
794  native layout for it. */
795  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
796 
797  return 0;
798 }
799 
801  unsigned sched_idx_enc,
802  const OutputFilterOptions *opts)
803 {
804  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
805  FilterGraph *fg = ofilter->graph;
806  FilterGraphPriv *fgp = fgp_from_fg(fg);
807  int ret;
808 
809  av_assert0(!ofilter->bound);
810  av_assert0(ofilter->type == ost->type);
811 
812  ofilter->bound = 1;
813  av_freep(&ofilter->linklabel);
814 
815  ofp->flags = opts->flags;
816  ofp->ts_offset = opts->ts_offset;
817  ofp->enc_timebase = opts->output_tb;
818 
819  ofp->trim_start_us = opts->trim_start_us;
820  ofp->trim_duration_us = opts->trim_duration_us;
821 
822  ofp->name = av_strdup(opts->name);
823  if (!ofp->name)
824  return AVERROR(EINVAL);
825 
826  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
827  if (ret < 0)
828  return ret;
829 
830  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
831  if (ret < 0)
832  return ret;
833 
834  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
835  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
836 
837  if (fgp->is_simple) {
838  // for simple filtergraph there is just one output,
839  // so use only graph-level information for logging
840  ofp->log_parent = NULL;
841  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
842  } else
843  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
844 
845  switch (ofilter->type) {
846  case AVMEDIA_TYPE_VIDEO:
847  ofp->width = opts->width;
848  ofp->height = opts->height;
849  if (opts->format != AV_PIX_FMT_NONE) {
850  ofp->format = opts->format;
851  } else if (opts->pix_fmts)
852  ofp->formats = opts->pix_fmts;
853  else if (opts->enc)
854  ofp->formats = opts->enc->pix_fmts;
855 
857 
858  ofp->fps.last_frame = av_frame_alloc();
859  if (!ofp->fps.last_frame)
860  return AVERROR(ENOMEM);
861 
862  ofp->fps.vsync_method = opts->vsync_method;
863  ofp->fps.framerate = ost->frame_rate;
864  ofp->fps.framerate_max = ost->max_frame_rate;
865  ofp->fps.framerate_supported = ost->force_fps || !opts->enc ?
866  NULL : opts->enc->supported_framerates;
867 
868  // reduce frame rate for mpeg4 to be within the spec limits
869  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
870  ofp->fps.framerate_clip = 65535;
871 
872  ofp->fps.dup_warning = 1000;
873 
874  break;
875  case AVMEDIA_TYPE_AUDIO:
876  if (opts->format != AV_SAMPLE_FMT_NONE) {
877  ofp->format = opts->format;
878  } else if (opts->enc) {
879  ofp->formats = opts->enc->sample_fmts;
880  }
881  if (opts->sample_rate) {
882  ofp->sample_rate = opts->sample_rate;
883  } else if (opts->enc) {
884  ofp->sample_rates = opts->enc->supported_samplerates;
885  }
886  if (opts->ch_layout.nb_channels) {
887  int ret = set_channel_layout(ofp, opts->enc ? opts->enc->ch_layouts : NULL,
888  &opts->ch_layout);
889  if (ret < 0)
890  return ret;
891  } else if (opts->enc) {
892  ofp->ch_layouts = opts->enc->ch_layouts;
893  }
894  break;
895  }
896 
897  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
898  SCH_ENC(sched_idx_enc));
899  if (ret < 0)
900  return ret;
901 
902  return 0;
903 }
904 
906  const OutputFilterOptions *opts)
907 {
908  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
909 
910  av_assert0(!ofilter->bound);
911  av_assert0(ofilter->type == ifp->type);
912 
913  ofilter->bound = 1;
914  av_freep(&ofilter->linklabel);
915 
916  ofp->name = av_strdup(opts->name);
917  if (!ofp->name)
918  return AVERROR(EINVAL);
919 
920  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
921 
922  return 0;
923 }
924 
925 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
926 {
928  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
930  char name[32];
931  int ret;
932 
933  av_assert0(!ifp->bound);
934  ifp->bound = 1;
935 
936  if (ifp->type != ofilter_src->type) {
937  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
938  av_get_media_type_string(ofilter_src->type),
940  return AVERROR(EINVAL);
941  }
942 
943  ifp->type_src = ifp->type;
944 
945  memset(&opts, 0, sizeof(opts));
946 
947  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->index);
948  opts.name = name;
949 
950  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
951  if (ret < 0)
952  return ret;
953 
954  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
955  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
956  if (ret < 0)
957  return ret;
958 
959  return 0;
960 }
961 
963 {
964  InputFilterPriv *ifp;
965  InputFilter *ifilter;
966 
967  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
968  if (!ifp)
969  return NULL;
970 
971  ifilter = &ifp->ifilter;
972  ifilter->graph = fg;
973 
974  ifp->frame = av_frame_alloc();
975  if (!ifp->frame)
976  return NULL;
977 
978  ifp->index = fg->nb_inputs - 1;
979  ifp->format = -1;
982 
984  if (!ifp->frame_queue)
985  return NULL;
986 
987  return ifilter;
988 }
989 
990 void fg_free(FilterGraph **pfg)
991 {
992  FilterGraph *fg = *pfg;
993  FilterGraphPriv *fgp;
994 
995  if (!fg)
996  return;
997  fgp = fgp_from_fg(fg);
998 
999  for (int j = 0; j < fg->nb_inputs; j++) {
1000  InputFilter *ifilter = fg->inputs[j];
1001  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1002 
1003  if (ifp->frame_queue) {
1004  AVFrame *frame;
1005  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1006  av_frame_free(&frame);
1007  av_fifo_freep2(&ifp->frame_queue);
1008  }
1009  av_frame_free(&ifp->sub2video.frame);
1010 
1011  av_frame_free(&ifp->frame);
1012  av_frame_free(&ifp->opts.fallback);
1013 
1015  av_freep(&ifp->linklabel);
1016  av_freep(&ifp->opts.name);
1017  av_freep(&ifilter->name);
1018  av_freep(&fg->inputs[j]);
1019  }
1020  av_freep(&fg->inputs);
1021  for (int j = 0; j < fg->nb_outputs; j++) {
1022  OutputFilter *ofilter = fg->outputs[j];
1023  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1024 
1025  av_frame_free(&ofp->fps.last_frame);
1026  av_dict_free(&ofp->sws_opts);
1027  av_dict_free(&ofp->swr_opts);
1028 
1029  av_freep(&ofilter->linklabel);
1030  av_freep(&ofilter->name);
1031  av_freep(&ofilter->apad);
1032  av_freep(&ofp->name);
1034  av_freep(&fg->outputs[j]);
1035  }
1036  av_freep(&fg->outputs);
1037  av_freep(&fgp->graph_desc);
1038  av_freep(&fgp->nb_threads);
1039 
1040  av_frame_free(&fgp->frame);
1041  av_frame_free(&fgp->frame_enc);
1042 
1043  av_freep(pfg);
1044 }
1045 
1046 static const char *fg_item_name(void *obj)
1047 {
1048  const FilterGraphPriv *fgp = obj;
1049 
1050  return fgp->log_name;
1051 }
1052 
1053 static const AVClass fg_class = {
1054  .class_name = "FilterGraph",
1055  .version = LIBAVUTIL_VERSION_INT,
1056  .item_name = fg_item_name,
1057  .category = AV_CLASS_CATEGORY_FILTER,
1058 };
1059 
1060 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
1061 {
1062  FilterGraphPriv *fgp;
1063  FilterGraph *fg;
1064 
1066  AVFilterGraph *graph;
1067  int ret = 0;
1068 
1069  fgp = av_mallocz(sizeof(*fgp));
1070  if (!fgp)
1071  return AVERROR(ENOMEM);
1072  fg = &fgp->fg;
1073 
1074  if (pfg) {
1075  *pfg = fg;
1076  fg->index = -1;
1077  } else {
1079  if (ret < 0) {
1080  av_freep(&fgp);
1081  return ret;
1082  }
1083 
1084  fg->index = nb_filtergraphs - 1;
1085  }
1086 
1087  fg->class = &fg_class;
1088  fgp->graph_desc = graph_desc;
1090  fgp->sch = sch;
1091 
1092  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1093 
1094  fgp->frame = av_frame_alloc();
1095  fgp->frame_enc = av_frame_alloc();
1096  if (!fgp->frame || !fgp->frame_enc)
1097  return AVERROR(ENOMEM);
1098 
1099  /* this graph is only used for determining the kinds of inputs
1100  * and outputs we have, and is discarded on exit from this function */
1101  graph = avfilter_graph_alloc();
1102  if (!graph)
1103  return AVERROR(ENOMEM);;
1104  graph->nb_threads = 1;
1105 
1106  ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
1107  if (ret < 0)
1108  goto fail;
1109 
1110  for (unsigned i = 0; i < graph->nb_filters; i++) {
1111  const AVFilter *f = graph->filters[i]->filter;
1112  if (!avfilter_filter_pad_count(f, 0) &&
1113  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) {
1114  fgp->have_sources = 1;
1115  break;
1116  }
1117  }
1118 
1119  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1120  InputFilter *const ifilter = ifilter_alloc(fg);
1121  InputFilterPriv *ifp;
1122 
1123  if (!ifilter) {
1124  ret = AVERROR(ENOMEM);
1125  goto fail;
1126  }
1127 
1128  ifp = ifp_from_ifilter(ifilter);
1129  ifp->linklabel = cur->name;
1130  cur->name = NULL;
1131 
1132  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1133  cur->pad_idx);
1134 
1135  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1136  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1137  "currently.\n");
1138  ret = AVERROR(ENOSYS);
1139  goto fail;
1140  }
1141 
1142  ifilter->name = describe_filter_link(fg, cur, 1);
1143  if (!ifilter->name) {
1144  ret = AVERROR(ENOMEM);
1145  goto fail;
1146  }
1147  }
1148 
1149  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1150  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1151  cur->pad_idx);
1152  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1153 
1154  if (!ofilter) {
1155  ret = AVERROR(ENOMEM);
1156  goto fail;
1157  }
1158 
1159  ofilter->linklabel = cur->name;
1160  cur->name = NULL;
1161 
1162  ofilter->name = describe_filter_link(fg, cur, 0);
1163  if (!ofilter->name) {
1164  ret = AVERROR(ENOMEM);
1165  goto fail;
1166  }
1167  }
1168 
1169  if (!fg->nb_outputs) {
1170  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1171  ret = AVERROR(ENOSYS);
1172  goto fail;
1173  }
1174 
1175  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1176  filter_thread, fgp);
1177  if (ret < 0)
1178  goto fail;
1179  fgp->sch_idx = ret;
1180 
1181 fail:
1184  avfilter_graph_free(&graph);
1185 
1186  if (ret < 0)
1187  return ret;
1188 
1189  return 0;
1190 }
1191 
1193  char *graph_desc,
1194  Scheduler *sch, unsigned sched_idx_enc,
1195  const OutputFilterOptions *opts)
1196 {
1197  FilterGraph *fg;
1198  FilterGraphPriv *fgp;
1199  int ret;
1200 
1201  ret = fg_create(&ost->fg_simple, graph_desc, sch);
1202  if (ret < 0)
1203  return ret;
1204  fg = ost->fg_simple;
1205  fgp = fgp_from_fg(fg);
1206 
1207  fgp->is_simple = 1;
1208 
1209  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1210  av_get_media_type_string(ost->type)[0], opts->name);
1211 
1212  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1213  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1214  "to have exactly 1 input and 1 output. "
1215  "However, it had %d input(s) and %d output(s). Please adjust, "
1216  "or use a complex filtergraph (-filter_complex) instead.\n",
1217  graph_desc, fg->nb_inputs, fg->nb_outputs);
1218  return AVERROR(EINVAL);
1219  }
1220  if (fg->outputs[0]->type != ost->type) {
1221  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1222  "it to %s output stream\n",
1224  av_get_media_type_string(ost->type));
1225  return AVERROR(EINVAL);
1226  }
1227 
1228  ost->filter = fg->outputs[0];
1229 
1230  ret = ifilter_bind_ist(fg->inputs[0], ist);
1231  if (ret < 0)
1232  return ret;
1233 
1234  ret = ofilter_bind_ost(fg->outputs[0], ost, sched_idx_enc, opts);
1235  if (ret < 0)
1236  return ret;
1237 
1238  if (opts->nb_threads) {
1239  av_freep(&fgp->nb_threads);
1240  fgp->nb_threads = av_strdup(opts->nb_threads);
1241  if (!fgp->nb_threads)
1242  return AVERROR(ENOMEM);
1243  }
1244 
1245  return 0;
1246 }
1247 
1249 {
1250  FilterGraphPriv *fgp = fgp_from_fg(fg);
1251  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1252  InputStream *ist = NULL;
1253  enum AVMediaType type = ifp->type;
1254  int i, ret;
1255 
1256  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1257  // bind to a standalone decoder
1258  int dec_idx;
1259 
1260  dec_idx = strtol(ifp->linklabel + 4, NULL, 0);
1261  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1262  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1263  dec_idx, fgp->graph_desc);
1264  return AVERROR(EINVAL);
1265  }
1266 
1267  ret = ifilter_bind_dec(ifp, decoders[dec_idx]);
1268  if (ret < 0)
1269  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1270  ifilter->name);
1271  return ret;
1272  } else if (ifp->linklabel) {
1273  AVFormatContext *s;
1274  AVStream *st = NULL;
1275  char *p;
1276  int file_idx;
1277 
1278  // try finding an unbound filtergraph output with this label
1279  for (int i = 0; i < nb_filtergraphs; i++) {
1280  FilterGraph *fg_src = filtergraphs[i];
1281 
1282  if (fg == fg_src)
1283  continue;
1284 
1285  for (int j = 0; j < fg_src->nb_outputs; j++) {
1286  OutputFilter *ofilter = fg_src->outputs[j];
1287 
1288  if (!ofilter->bound && ofilter->linklabel &&
1289  !strcmp(ofilter->linklabel, ifp->linklabel)) {
1290  av_log(fg, AV_LOG_VERBOSE,
1291  "Binding input with label '%s' to filtergraph output %d:%d\n",
1292  ifp->linklabel, i, j);
1293 
1294  ret = ifilter_bind_fg(ifp, fg_src, j);
1295  if (ret < 0)
1296  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1297  ifp->linklabel);
1298  return ret;
1299  }
1300  }
1301  }
1302 
1303  // bind to an explicitly specified demuxer stream
1304  file_idx = strtol(ifp->linklabel, &p, 0);
1305  if (file_idx < 0 || file_idx >= nb_input_files) {
1306  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1307  file_idx, fgp->graph_desc);
1308  return AVERROR(EINVAL);
1309  }
1310  s = input_files[file_idx]->ctx;
1311 
1312  for (i = 0; i < s->nb_streams; i++) {
1313  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1314  if (stream_type != type &&
1315  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1316  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1317  continue;
1318  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
1319  st = s->streams[i];
1320  break;
1321  }
1322  }
1323  if (!st) {
1324  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1325  "matches no streams.\n", p, fgp->graph_desc);
1326  return AVERROR(EINVAL);
1327  }
1328  ist = input_files[file_idx]->streams[st->index];
1329 
1330  av_log(fg, AV_LOG_VERBOSE,
1331  "Binding input with label '%s' to input stream %d:%d\n",
1332  ifp->linklabel, ist->file->index, ist->index);
1333  } else {
1334  ist = ist_find_unused(type);
1335  if (!ist) {
1336  av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
1337  "unlabeled input pad %s\n", ifilter->name);
1338  return AVERROR(EINVAL);
1339  }
1340 
1341  av_log(fg, AV_LOG_VERBOSE,
1342  "Binding unlabeled input %d to input stream %d:%d\n",
1343  ifp->index, ist->file->index, ist->index);
1344  }
1345  av_assert0(ist);
1346 
1347  ret = ifilter_bind_ist(ifilter, ist);
1348  if (ret < 0) {
1349  av_log(fg, AV_LOG_ERROR,
1350  "Error binding an input stream to complex filtergraph input %s.\n",
1351  ifilter->name);
1352  return ret;
1353  }
1354 
1355  return 0;
1356 }
1357 
1358 static int bind_inputs(FilterGraph *fg)
1359 {
1360  // bind filtergraph inputs to input streams or other filtergraphs
1361  for (int i = 0; i < fg->nb_inputs; i++) {
1363  int ret;
1364 
1365  if (ifp->bound)
1366  continue;
1367 
1368  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1369  if (ret < 0)
1370  return ret;
1371  }
1372 
1373  return 0;
1374 }
1375 
1377 {
1378  int ret;
1379 
1380  for (int i = 0; i < nb_filtergraphs; i++) {
1382  if (ret < 0)
1383  return ret;
1384  }
1385 
1386  // check that all outputs were bound
1387  for (int i = 0; i < nb_filtergraphs; i++) {
1388  FilterGraph *fg = filtergraphs[i];
1389 
1390  for (int j = 0; j < fg->nb_outputs; j++) {
1391  OutputFilter *output = fg->outputs[j];
1392  if (!output->bound) {
1394  "Filter %s has an unconnected output\n", output->name);
1395  return AVERROR(EINVAL);
1396  }
1397  }
1398  }
1399 
1400  return 0;
1401 }
1402 
1403 static int insert_trim(int64_t start_time, int64_t duration,
1404  AVFilterContext **last_filter, int *pad_idx,
1405  const char *filter_name)
1406 {
1407  AVFilterGraph *graph = (*last_filter)->graph;
1409  const AVFilter *trim;
1410  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1411  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1412  int ret = 0;
1413 
1414  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1415  return 0;
1416 
1417  trim = avfilter_get_by_name(name);
1418  if (!trim) {
1419  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
1420  "recording time.\n", name);
1421  return AVERROR_FILTER_NOT_FOUND;
1422  }
1423 
1424  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1425  if (!ctx)
1426  return AVERROR(ENOMEM);
1427 
1428  if (duration != INT64_MAX) {
1429  ret = av_opt_set_int(ctx, "durationi", duration,
1431  }
1432  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1433  ret = av_opt_set_int(ctx, "starti", start_time,
1435  }
1436  if (ret < 0) {
1437  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1438  return ret;
1439  }
1440 
1442  if (ret < 0)
1443  return ret;
1444 
1445  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1446  if (ret < 0)
1447  return ret;
1448 
1449  *last_filter = ctx;
1450  *pad_idx = 0;
1451  return 0;
1452 }
1453 
1454 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1455  const char *filter_name, const char *args)
1456 {
1457  AVFilterGraph *graph = (*last_filter)->graph;
1459  int ret;
1460 
1462  avfilter_get_by_name(filter_name),
1463  filter_name, args, NULL, graph);
1464  if (ret < 0)
1465  return ret;
1466 
1467  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1468  if (ret < 0)
1469  return ret;
1470 
1471  *last_filter = ctx;
1472  *pad_idx = 0;
1473  return 0;
1474 }
1475 
1477  OutputFilter *ofilter, AVFilterInOut *out)
1478 {
1479  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1480  AVFilterContext *last_filter = out->filter_ctx;
1481  AVBPrint bprint;
1482  int pad_idx = out->pad_idx;
1483  int ret;
1484  const char *pix_fmts;
1485  char name[255];
1486 
1487  snprintf(name, sizeof(name), "out_%s", ofp->name);
1489  avfilter_get_by_name("buffersink"),
1490  name, NULL, NULL, graph);
1491 
1492  if (ret < 0)
1493  return ret;
1494 
1495  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1496  char args[255];
1498  const AVDictionaryEntry *e = NULL;
1499 
1500  snprintf(args, sizeof(args), "%d:%d",
1501  ofp->width, ofp->height);
1502 
1503  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1504  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1505  }
1506 
1507  snprintf(name, sizeof(name), "scaler_out_%s", ofp->name);
1509  name, args, NULL, graph)) < 0)
1510  return ret;
1511  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1512  return ret;
1513 
1514  last_filter = filter;
1515  pad_idx = 0;
1516  }
1517 
1519  ret = choose_pix_fmts(ofilter, &bprint, &pix_fmts);
1520  if (ret < 0)
1521  return ret;
1522 
1523  if (pix_fmts) {
1525 
1527  avfilter_get_by_name("format"),
1528  "format", pix_fmts, NULL, graph);
1529  av_bprint_finalize(&bprint, NULL);
1530  if (ret < 0)
1531  return ret;
1532  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1533  return ret;
1534 
1535  last_filter = filter;
1536  pad_idx = 0;
1537  }
1538 
1539  snprintf(name, sizeof(name), "trim_out_%s", ofp->name);
1541  &last_filter, &pad_idx, name);
1542  if (ret < 0)
1543  return ret;
1544 
1545 
1546  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1547  return ret;
1548 
1549  return 0;
1550 }
1551 
1553  OutputFilter *ofilter, AVFilterInOut *out)
1554 {
1555  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1556  AVFilterContext *last_filter = out->filter_ctx;
1557  int pad_idx = out->pad_idx;
1558  AVBPrint args;
1559  char name[255];
1560  int ret;
1561 
1562  snprintf(name, sizeof(name), "out_%s", ofp->name);
1564  avfilter_get_by_name("abuffersink"),
1565  name, NULL, NULL, graph);
1566  if (ret < 0)
1567  return ret;
1568  if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1569  return ret;
1570 
1571 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1572  AVFilterContext *filt_ctx; \
1573  \
1574  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1575  "similarly to -af " filter_name "=%s.\n", arg); \
1576  \
1577  ret = avfilter_graph_create_filter(&filt_ctx, \
1578  avfilter_get_by_name(filter_name), \
1579  filter_name, arg, NULL, graph); \
1580  if (ret < 0) \
1581  goto fail; \
1582  \
1583  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1584  if (ret < 0) \
1585  goto fail; \
1586  \
1587  last_filter = filt_ctx; \
1588  pad_idx = 0; \
1589 } while (0)
1591 
1592  choose_sample_fmts(ofp, &args);
1593  choose_sample_rates(ofp, &args);
1594  choose_channel_layouts(ofp, &args);
1595  if (!av_bprint_is_complete(&args)) {
1596  ret = AVERROR(ENOMEM);
1597  goto fail;
1598  }
1599  if (args.len) {
1601 
1602  snprintf(name, sizeof(name), "format_out_%s", ofp->name);
1604  avfilter_get_by_name("aformat"),
1605  name, args.str, NULL, graph);
1606  if (ret < 0)
1607  goto fail;
1608 
1609  ret = avfilter_link(last_filter, pad_idx, format, 0);
1610  if (ret < 0)
1611  goto fail;
1612 
1613  last_filter = format;
1614  pad_idx = 0;
1615  }
1616 
1617  if (ofilter->apad)
1618  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1619 
1620  snprintf(name, sizeof(name), "trim for output %s", ofp->name);
1622  &last_filter, &pad_idx, name);
1623  if (ret < 0)
1624  goto fail;
1625 
1626  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1627  goto fail;
1628 fail:
1629  av_bprint_finalize(&args, NULL);
1630 
1631  return ret;
1632 }
1633 
1635  OutputFilter *ofilter, AVFilterInOut *out)
1636 {
1637  switch (ofilter->type) {
1638  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, graph, ofilter, out);
1639  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, graph, ofilter, out);
1640  default: av_assert0(0); return 0;
1641  }
1642 }
1643 
1645 {
1646  ifp->sub2video.last_pts = INT64_MIN;
1647  ifp->sub2video.end_pts = INT64_MIN;
1648 
1649  /* sub2video structure has been (re-)initialized.
1650  Mark it as such so that the system will be
1651  initialized with the first received heartbeat. */
1652  ifp->sub2video.initialize = 1;
1653 }
1654 
1656  InputFilter *ifilter, AVFilterInOut *in)
1657 {
1658  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1659 
1660  AVFilterContext *last_filter;
1661  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1662  const AVPixFmtDescriptor *desc;
1663  AVRational fr = ifp->opts.framerate;
1664  AVRational sar;
1665  AVBPrint args;
1666  char name[255];
1667  int ret, pad_idx = 0;
1669  if (!par)
1670  return AVERROR(ENOMEM);
1671 
1672  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1673  sub2video_prepare(ifp);
1674 
1675  sar = ifp->sample_aspect_ratio;
1676  if(!sar.den)
1677  sar = (AVRational){0,1};
1679  av_bprintf(&args,
1680  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1681  "pixel_aspect=%d/%d:colorspace=%d:range=%d",
1682  ifp->width, ifp->height, ifp->format,
1683  ifp->time_base.num, ifp->time_base.den, sar.num, sar.den,
1684  ifp->color_space, ifp->color_range);
1685  if (fr.num && fr.den)
1686  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1687  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1688  ifp->opts.name);
1689 
1690 
1691  if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
1692  args.str, NULL, graph)) < 0)
1693  goto fail;
1694  par->hw_frames_ctx = ifp->hw_frames_ctx;
1695  ret = av_buffersrc_parameters_set(ifp->filter, par);
1696  if (ret < 0)
1697  goto fail;
1698  av_freep(&par);
1699  last_filter = ifp->filter;
1700 
1702  av_assert0(desc);
1703 
1704  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1705  ifp->displaymatrix_applied = 0;
1706  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1707  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1708  int32_t *displaymatrix = ifp->displaymatrix;
1709  double theta;
1710 
1711  theta = get_rotation(displaymatrix);
1712 
1713  if (fabs(theta - 90) < 1.0) {
1714  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1715  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1716  } else if (fabs(theta - 180) < 1.0) {
1717  if (displaymatrix[0] < 0) {
1718  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1719  if (ret < 0)
1720  return ret;
1721  }
1722  if (displaymatrix[4] < 0) {
1723  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1724  }
1725  } else if (fabs(theta - 270) < 1.0) {
1726  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1727  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1728  } else if (fabs(theta) > 1.0) {
1729  char rotate_buf[64];
1730  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1731  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1732  } else if (fabs(theta) < 1.0) {
1733  if (displaymatrix && displaymatrix[4] < 0) {
1734  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1735  }
1736  }
1737  if (ret < 0)
1738  return ret;
1739 
1740  ifp->displaymatrix_applied = 1;
1741  }
1742 
1743  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1745  &last_filter, &pad_idx, name);
1746  if (ret < 0)
1747  return ret;
1748 
1749  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1750  return ret;
1751  return 0;
1752 fail:
1753  av_freep(&par);
1754 
1755  return ret;
1756 }
1757 
1759  InputFilter *ifilter, AVFilterInOut *in)
1760 {
1761  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1762  AVFilterContext *last_filter;
1763  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1764  AVBPrint args;
1765  char name[255];
1766  int ret, pad_idx = 0;
1767 
1769  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1770  ifp->time_base.num, ifp->time_base.den,
1771  ifp->sample_rate,
1773  if (av_channel_layout_check(&ifp->ch_layout) &&
1775  av_bprintf(&args, ":channel_layout=");
1777  } else
1778  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1779  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1780 
1781  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1782  name, args.str, NULL,
1783  graph)) < 0)
1784  return ret;
1785  last_filter = ifp->filter;
1786 
1787  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1789  &last_filter, &pad_idx, name);
1790  if (ret < 0)
1791  return ret;
1792 
1793  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1794  return ret;
1795 
1796  return 0;
1797 }
1798 
1800  InputFilter *ifilter, AVFilterInOut *in)
1801 {
1802  switch (ifp_from_ifilter(ifilter)->type) {
1803  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1804  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1805  default: av_assert0(0); return 0;
1806  }
1807 }
1808 
1810 {
1811  for (int i = 0; i < fg->nb_outputs; i++)
1813  for (int i = 0; i < fg->nb_inputs; i++)
1814  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1815  avfilter_graph_free(&fgt->graph);
1816 }
1817 
1819 {
1820  return f->nb_inputs == 0 &&
1821  (!strcmp(f->filter->name, "buffer") ||
1822  !strcmp(f->filter->name, "abuffer"));
1823 }
1824 
1825 static int graph_is_meta(AVFilterGraph *graph)
1826 {
1827  for (unsigned i = 0; i < graph->nb_filters; i++) {
1828  const AVFilterContext *f = graph->filters[i];
1829 
1830  /* in addition to filters flagged as meta, also
1831  * disregard sinks and buffersources (but not other sources,
1832  * since they introduce data we are not aware of)
1833  */
1834  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1835  f->nb_outputs == 0 ||
1837  return 0;
1838  }
1839  return 1;
1840 }
1841 
1842 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1843 
1845 {
1846  FilterGraphPriv *fgp = fgp_from_fg(fg);
1847  AVBufferRef *hw_device;
1848  AVFilterInOut *inputs, *outputs, *cur;
1849  int ret, i, simple = filtergraph_is_simple(fg);
1850  int have_input_eof = 0;
1851  const char *graph_desc = fgp->graph_desc;
1852 
1853  cleanup_filtergraph(fg, fgt);
1854  fgt->graph = avfilter_graph_alloc();
1855  if (!fgt->graph)
1856  return AVERROR(ENOMEM);
1857 
1858  if (simple) {
1859  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1860 
1861  if (filter_nbthreads) {
1862  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1863  if (ret < 0)
1864  goto fail;
1865  } else if (fgp->nb_threads) {
1866  ret = av_opt_set(fgt->graph, "threads", fgp->nb_threads, 0);
1867  if (ret < 0)
1868  return ret;
1869  }
1870 
1871  if (av_dict_count(ofp->sws_opts)) {
1873  &fgt->graph->scale_sws_opts,
1874  '=', ':');
1875  if (ret < 0)
1876  goto fail;
1877  }
1878 
1879  if (av_dict_count(ofp->swr_opts)) {
1880  char *args;
1881  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1882  if (ret < 0)
1883  goto fail;
1884  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1885  av_free(args);
1886  }
1887  } else {
1889  }
1890 
1891  hw_device = hw_device_for_filter();
1892 
1893  if ((ret = graph_parse(fgt->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
1894  goto fail;
1895 
1896  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1897  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1900  goto fail;
1901  }
1903 
1904  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1905  ret = configure_output_filter(fg, fgt->graph, fg->outputs[i], cur);
1906  if (ret < 0) {
1908  goto fail;
1909  }
1910  }
1912 
1913  if (fgp->disable_conversions)
1915  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1916  goto fail;
1917 
1918  fgp->is_meta = graph_is_meta(fgt->graph);
1919 
1920  /* limit the lists of allowed formats to the ones selected, to
1921  * make sure they stay the same if the filtergraph is reconfigured later */
1922  for (int i = 0; i < fg->nb_outputs; i++) {
1923  OutputFilter *ofilter = fg->outputs[i];
1924  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1925  AVFilterContext *sink = ofp->filter;
1926 
1927  ofp->format = av_buffersink_get_format(sink);
1928 
1929  ofp->width = av_buffersink_get_w(sink);
1930  ofp->height = av_buffersink_get_h(sink);
1931 
1932  // If the timing parameters are not locked yet, get the tentative values
1933  // here but don't lock them. They will only be used if no output frames
1934  // are ever produced.
1935  if (!ofp->tb_out_locked) {
1937  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1938  fr.num > 0 && fr.den > 0)
1939  ofp->fps.framerate = fr;
1940  ofp->tb_out = av_buffersink_get_time_base(sink);
1941  }
1943 
1946  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1947  if (ret < 0)
1948  goto fail;
1949  }
1950 
1951  for (int i = 0; i < fg->nb_inputs; i++) {
1953  AVFrame *tmp;
1954  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1955  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1956  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
1957  } else {
1959  }
1960  av_frame_free(&tmp);
1961  if (ret < 0)
1962  goto fail;
1963  }
1964  }
1965 
1966  /* send the EOFs for the finished inputs */
1967  for (int i = 0; i < fg->nb_inputs; i++) {
1969  if (fgt->eof_in[i]) {
1971  if (ret < 0)
1972  goto fail;
1973  have_input_eof = 1;
1974  }
1975  }
1976 
1977  if (have_input_eof) {
1978  // make sure the EOF propagates to the end of the graph
1980  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1981  goto fail;
1982  }
1983 
1984  return 0;
1985 fail:
1986  cleanup_filtergraph(fg, fgt);
1987  return ret;
1988 }
1989 
1991 {
1992  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1993  AVFrameSideData *sd;
1994  int ret;
1995 
1996  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
1997  if (ret < 0)
1998  return ret;
1999 
2000  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2001  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2002  frame->time_base;
2003 
2004  ifp->format = frame->format;
2005 
2006  ifp->width = frame->width;
2007  ifp->height = frame->height;
2008  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2009  ifp->color_space = frame->colorspace;
2010  ifp->color_range = frame->color_range;
2011 
2012  ifp->sample_rate = frame->sample_rate;
2013  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2014  if (ret < 0)
2015  return ret;
2016 
2018  if (sd)
2019  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2020  ifp->displaymatrix_present = !!sd;
2021 
2022  return 0;
2023 }
2024 
2026 {
2027  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2028  return fgp->is_simple;
2029 }
2030 
2031 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2032  double time, const char *target,
2033  const char *command, const char *arg, int all_filters)
2034 {
2035  int ret;
2036 
2037  if (!graph)
2038  return;
2039 
2040  if (time < 0) {
2041  char response[4096];
2042  ret = avfilter_graph_send_command(graph, target, command, arg,
2043  response, sizeof(response),
2044  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2045  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2046  fg->index, ret, response);
2047  } else if (!all_filters) {
2048  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2049  } else {
2050  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2051  if (ret < 0)
2052  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2053  }
2054 }
2055 
2056 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2057 {
2058  int nb_requests, nb_requests_max = -1;
2059  int best_input = -1;
2060 
2061  for (int i = 0; i < fg->nb_inputs; i++) {
2062  InputFilter *ifilter = fg->inputs[i];
2063  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2064 
2065  if (fgt->eof_in[i])
2066  continue;
2067 
2068  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2069  if (nb_requests > nb_requests_max) {
2070  nb_requests_max = nb_requests;
2071  best_input = i;
2072  }
2073  }
2074 
2075  av_assert0(best_input >= 0);
2076 
2077  return best_input;
2078 }
2079 
2081 {
2082  OutputFilter *ofilter = &ofp->ofilter;
2083  FPSConvContext *fps = &ofp->fps;
2084  AVRational tb = (AVRational){ 0, 0 };
2085  AVRational fr;
2086  const FrameData *fd;
2087 
2088  fd = frame_data_c(frame);
2089 
2090  // apply -enc_time_base
2091  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2092  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2093  av_log(ofp, AV_LOG_ERROR,
2094  "Demuxing timebase not available - cannot use it for encoding\n");
2095  return AVERROR(EINVAL);
2096  }
2097 
2098  switch (ofp->enc_timebase.num) {
2099  case 0: break;
2100  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2101  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2102  default: tb = ofp->enc_timebase; break;
2103  }
2104 
2105  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2106  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2107  goto finish;
2108  }
2109 
2110  fr = fps->framerate;
2111  if (!fr.num) {
2113  if (fr_sink.num > 0 && fr_sink.den > 0)
2114  fr = fr_sink;
2115  }
2116 
2117  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2118  if (!fr.num && !fps->framerate_max.num) {
2119  fr = (AVRational){25, 1};
2120  av_log(ofp, AV_LOG_WARNING,
2121  "No information "
2122  "about the input framerate is available. Falling "
2123  "back to a default value of 25fps. Use the -r option "
2124  "if you want a different framerate.\n");
2125  }
2126 
2127  if (fps->framerate_max.num &&
2128  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2129  !fr.den))
2130  fr = fps->framerate_max;
2131  }
2132 
2133  if (fr.num > 0) {
2134  if (fps->framerate_supported) {
2135  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2136  fr = fps->framerate_supported[idx];
2137  }
2138  if (fps->framerate_clip) {
2139  av_reduce(&fr.num, &fr.den,
2140  fr.num, fr.den, fps->framerate_clip);
2141  }
2142  }
2143 
2144  if (!(tb.num > 0 && tb.den > 0))
2145  tb = av_inv_q(fr);
2146  if (!(tb.num > 0 && tb.den > 0))
2147  tb = frame->time_base;
2148 
2149  fps->framerate = fr;
2150 finish:
2151  ofp->tb_out = tb;
2152  ofp->tb_out_locked = 1;
2153 
2154  return 0;
2155 }
2156 
2158  int64_t start_time)
2159 {
2160  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2161 
2162  AVRational tb = tb_dst;
2163  AVRational filter_tb = frame->time_base;
2164  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2165 
2166  if (frame->pts == AV_NOPTS_VALUE)
2167  goto early_exit;
2168 
2169  tb.den <<= extra_bits;
2170  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2172  float_pts /= 1 << extra_bits;
2173  // when float_pts is not exactly an integer,
2174  // avoid exact midpoints to reduce the chance of rounding differences, this
2175  // can be removed in case the fps code is changed to work with integers
2176  if (float_pts != llrint(float_pts))
2177  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2178 
2179  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2181  frame->time_base = tb_dst;
2182 
2183 early_exit:
2184 
2185  if (debug_ts) {
2186  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2187  frame ? av_ts2str(frame->pts) : "NULL",
2188  av_ts2timestr(frame->pts, &tb_dst),
2189  float_pts, tb_dst.num, tb_dst.den);
2190  }
2191 
2192  return float_pts;
2193 }
2194 
2195 /* Convert frame timestamps to the encoder timebase and decide how many times
2196  * should this (and possibly previous) frame be repeated in order to conform to
2197  * desired target framerate (if any).
2198  */
2200  int64_t *nb_frames, int64_t *nb_frames_prev)
2201 {
2202  OutputFilter *ofilter = &ofp->ofilter;
2203  FPSConvContext *fps = &ofp->fps;
2204  double delta0, delta, sync_ipts, duration;
2205 
2206  if (!frame) {
2207  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2208  fps->frames_prev_hist[1],
2209  fps->frames_prev_hist[2]);
2210 
2211  if (!*nb_frames && fps->last_dropped) {
2212  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2213  fps->last_dropped++;
2214  }
2215 
2216  goto finish;
2217  }
2218 
2219  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2220 
2221  sync_ipts = adjust_frame_pts_to_encoder_tb(frame, ofp->tb_out, ofp->ts_offset);
2222  /* delta0 is the "drift" between the input frame and
2223  * where it would fall in the output. */
2224  delta0 = sync_ipts - ofp->next_pts;
2225  delta = delta0 + duration;
2226 
2227  // tracks the number of times the PREVIOUS frame should be duplicated,
2228  // mostly for variable framerate (VFR)
2229  *nb_frames_prev = 0;
2230  /* by default, we output a single frame */
2231  *nb_frames = 1;
2232 
2233  if (delta0 < 0 &&
2234  delta > 0 &&
2237  && fps->vsync_method != VSYNC_DROP
2238 #endif
2239  ) {
2240  if (delta0 < -0.6) {
2241  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2242  } else
2243  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2244  sync_ipts = ofp->next_pts;
2245  duration += delta0;
2246  delta0 = 0;
2247  }
2248 
2249  switch (fps->vsync_method) {
2250  case VSYNC_VSCFR:
2251  if (fps->frame_number == 0 && delta0 >= 0.5) {
2252  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2253  delta = duration;
2254  delta0 = 0;
2255  ofp->next_pts = llrint(sync_ipts);
2256  }
2257  case VSYNC_CFR:
2258  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2259  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2260  *nb_frames = 0;
2261  } else if (delta < -1.1)
2262  *nb_frames = 0;
2263  else if (delta > 1.1) {
2264  *nb_frames = llrintf(delta);
2265  if (delta0 > 1.1)
2266  *nb_frames_prev = llrintf(delta0 - 0.6);
2267  }
2268  frame->duration = 1;
2269  break;
2270  case VSYNC_VFR:
2271  if (delta <= -0.6)
2272  *nb_frames = 0;
2273  else if (delta > 0.6)
2274  ofp->next_pts = llrint(sync_ipts);
2275  frame->duration = llrint(duration);
2276  break;
2277 #if FFMPEG_OPT_VSYNC_DROP
2278  case VSYNC_DROP:
2279 #endif
2280  case VSYNC_PASSTHROUGH:
2281  ofp->next_pts = llrint(sync_ipts);
2282  frame->duration = llrint(duration);
2283  break;
2284  default:
2285  av_assert0(0);
2286  }
2287 
2288 finish:
2289  memmove(fps->frames_prev_hist + 1,
2290  fps->frames_prev_hist,
2291  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2292  fps->frames_prev_hist[0] = *nb_frames_prev;
2293 
2294  if (*nb_frames_prev == 0 && fps->last_dropped) {
2295  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2296  av_log(ofp, AV_LOG_VERBOSE,
2297  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2298  fps->frame_number, fps->last_frame->pts);
2299  }
2300  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2301  uint64_t nb_frames_dup;
2302  if (*nb_frames > dts_error_threshold * 30) {
2303  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2304  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2305  *nb_frames = 0;
2306  return;
2307  }
2308  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2309  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2310  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2311  if (nb_frames_dup > fps->dup_warning) {
2312  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2313  fps->dup_warning *= 10;
2314  }
2315  }
2316 
2317  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2318  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2319 }
2320 
2322 {
2324  int ret;
2325 
2326  // we are finished and no frames were ever seen at this output,
2327  // at least initialize the encoder with a dummy frame
2328  if (!fgt->got_frame) {
2329  AVFrame *frame = fgt->frame;
2330  FrameData *fd;
2331 
2332  frame->time_base = ofp->tb_out;
2333  frame->format = ofp->format;
2334 
2335  frame->width = ofp->width;
2336  frame->height = ofp->height;
2337  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2338 
2339  frame->sample_rate = ofp->sample_rate;
2340  if (ofp->ch_layout.nb_channels) {
2341  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2342  if (ret < 0)
2343  return ret;
2344  }
2345 
2346  fd = frame_data(frame);
2347  if (!fd)
2348  return AVERROR(ENOMEM);
2349 
2350  fd->frame_rate_filter = ofp->fps.framerate;
2351 
2352  av_assert0(!frame->buf[0]);
2353 
2354  av_log(ofp, AV_LOG_WARNING,
2355  "No filtered frames for output stream, trying to "
2356  "initialize anyway.\n");
2357 
2358  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2359  if (ret < 0) {
2361  return ret;
2362  }
2363  }
2364 
2365  fgt->eof_out[ofp->index] = 1;
2366 
2367  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2368  return (ret == AVERROR_EOF) ? 0 : ret;
2369 }
2370 
2372  AVFrame *frame)
2373 {
2375  AVFrame *frame_prev = ofp->fps.last_frame;
2376  enum AVMediaType type = ofp->ofilter.type;
2377 
2378  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2379 
2380  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2381  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2382 
2383  for (int64_t i = 0; i < nb_frames; i++) {
2384  AVFrame *frame_out;
2385  int ret;
2386 
2387  if (type == AVMEDIA_TYPE_VIDEO) {
2388  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2389  frame_prev : frame;
2390  if (!frame_in)
2391  break;
2392 
2393  frame_out = fgp->frame_enc;
2394  ret = av_frame_ref(frame_out, frame_in);
2395  if (ret < 0)
2396  return ret;
2397 
2398  frame_out->pts = ofp->next_pts;
2399 
2400  if (ofp->fps.dropped_keyframe) {
2401  frame_out->flags |= AV_FRAME_FLAG_KEY;
2402  ofp->fps.dropped_keyframe = 0;
2403  }
2404  } else {
2405  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2406  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2408 
2409  frame->time_base = ofp->tb_out;
2410  frame->duration = av_rescale_q(frame->nb_samples,
2411  (AVRational){ 1, frame->sample_rate },
2412  ofp->tb_out);
2413 
2414  ofp->next_pts = frame->pts + frame->duration;
2415 
2416  frame_out = frame;
2417  }
2418 
2419  // send the frame to consumers
2420  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2421  if (ret < 0) {
2422  av_frame_unref(frame_out);
2423 
2424  if (!fgt->eof_out[ofp->index]) {
2425  fgt->eof_out[ofp->index] = 1;
2426  fgp->nb_outputs_done++;
2427  }
2428 
2429  return ret == AVERROR_EOF ? 0 : ret;
2430  }
2431 
2432  if (type == AVMEDIA_TYPE_VIDEO) {
2433  ofp->fps.frame_number++;
2434  ofp->next_pts++;
2435 
2436  if (i == nb_frames_prev && frame)
2437  frame->flags &= ~AV_FRAME_FLAG_KEY;
2438  }
2439 
2440  fgt->got_frame = 1;
2441  }
2442 
2443  if (frame && frame_prev) {
2444  av_frame_unref(frame_prev);
2445  av_frame_move_ref(frame_prev, frame);
2446  }
2447 
2448  if (!frame)
2449  return close_output(ofp, fgt);
2450 
2451  return 0;
2452 }
2453 
2455  AVFrame *frame)
2456 {
2458  AVFilterContext *filter = ofp->filter;
2459  FrameData *fd;
2460  int ret;
2461 
2464  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2465  ret = fg_output_frame(ofp, fgt, NULL);
2466  return (ret < 0) ? ret : 1;
2467  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2468  return 1;
2469  } else if (ret < 0) {
2470  av_log(ofp, AV_LOG_WARNING,
2471  "Error in retrieving a frame from the filtergraph: %s\n",
2472  av_err2str(ret));
2473  return ret;
2474  }
2475 
2476  if (fgt->eof_out[ofp->index]) {
2478  return 0;
2479  }
2480 
2482 
2483  if (debug_ts)
2484  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2485  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2486  frame->time_base.num, frame->time_base.den);
2487 
2488  // Choose the output timebase the first time we get a frame.
2489  if (!ofp->tb_out_locked) {
2490  ret = choose_out_timebase(ofp, frame);
2491  if (ret < 0) {
2492  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2494  return ret;
2495  }
2496  }
2497 
2498  fd = frame_data(frame);
2499  if (!fd) {
2501  return AVERROR(ENOMEM);
2502  }
2503 
2505 
2506  // only use bits_per_raw_sample passed through from the decoder
2507  // if the filtergraph did not touch the frame data
2508  if (!fgp->is_meta)
2509  fd->bits_per_raw_sample = 0;
2510 
2511  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2512  if (!frame->duration) {
2514  if (fr.num > 0 && fr.den > 0)
2515  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2516  }
2517 
2518  fd->frame_rate_filter = ofp->fps.framerate;
2519  }
2520 
2521  ret = fg_output_frame(ofp, fgt, frame);
2523  if (ret < 0)
2524  return ret;
2525 
2526  return 0;
2527 }
2528 
2529 /* retrieve all frames available at filtergraph outputs
2530  * and send them to consumers */
2532  AVFrame *frame)
2533 {
2534  FilterGraphPriv *fgp = fgp_from_fg(fg);
2535  int did_step = 0;
2536 
2537  // graph not configured, just select the input to request
2538  if (!fgt->graph) {
2539  for (int i = 0; i < fg->nb_inputs; i++) {
2541  if (ifp->format < 0 && !fgt->eof_in[i]) {
2542  fgt->next_in = i;
2543  return 0;
2544  }
2545  }
2546 
2547  // This state - graph is not configured, but all inputs are either
2548  // initialized or EOF - should be unreachable because sending EOF to a
2549  // filter without even a fallback format should fail
2550  av_assert0(0);
2551  return AVERROR_BUG;
2552  }
2553 
2554  while (fgp->nb_outputs_done < fg->nb_outputs) {
2555  int ret;
2556 
2558  if (ret == AVERROR(EAGAIN)) {
2559  fgt->next_in = choose_input(fg, fgt);
2560  break;
2561  } else if (ret < 0) {
2562  if (ret == AVERROR_EOF)
2563  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2564  else
2565  av_log(fg, AV_LOG_ERROR,
2566  "Error requesting a frame from the filtergraph: %s\n",
2567  av_err2str(ret));
2568  return ret;
2569  }
2570  fgt->next_in = fg->nb_inputs;
2571 
2572  // return after one iteration, so that scheduler can rate-control us
2573  if (did_step && fgp->have_sources)
2574  return 0;
2575 
2576  /* Reap all buffers present in the buffer sinks */
2577  for (int i = 0; i < fg->nb_outputs; i++) {
2579 
2580  ret = 0;
2581  while (!ret) {
2582  ret = fg_output_step(ofp, fgt, frame);
2583  if (ret < 0)
2584  return ret;
2585  }
2586  }
2587  did_step = 1;
2588  }
2589 
2590  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2591 }
2592 
2593 static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
2594 {
2595  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2596  int64_t pts2;
2597 
2598  /* subtitles seem to be usually muxed ahead of other streams;
2599  if not, subtracting a larger time here is necessary */
2600  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2601 
2602  /* do not send the heartbeat frame if the subtitle is already ahead */
2603  if (pts2 <= ifp->sub2video.last_pts)
2604  return;
2605 
2606  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2607  /* if we have hit the end of the current displayed subpicture,
2608  or if we need to initialize the system, update the
2609  overlayed subpicture and its start/end times */
2610  sub2video_update(ifp, pts2 + 1, NULL);
2611  else
2612  sub2video_push_ref(ifp, pts2);
2613 }
2614 
2615 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2616 {
2617  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2618  int ret;
2619 
2620  if (buffer) {
2621  AVFrame *tmp;
2622 
2623  if (!frame)
2624  return 0;
2625 
2626  tmp = av_frame_alloc();
2627  if (!tmp)
2628  return AVERROR(ENOMEM);
2629 
2631 
2632  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2633  if (ret < 0) {
2634  av_frame_free(&tmp);
2635  return ret;
2636  }
2637 
2638  return 0;
2639  }
2640 
2641  // heartbeat frame
2642  if (frame && !frame->buf[0]) {
2643  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2644  return 0;
2645  }
2646 
2647  if (!frame) {
2648  if (ifp->sub2video.end_pts < INT64_MAX)
2649  sub2video_update(ifp, INT64_MAX, NULL);
2650 
2651  return av_buffersrc_add_frame(ifp->filter, NULL);
2652  }
2653 
2654  ifp->width = frame->width ? frame->width : ifp->width;
2655  ifp->height = frame->height ? frame->height : ifp->height;
2656 
2657  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2658 
2659  return 0;
2660 }
2661 
2662 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2663  int64_t pts, AVRational tb)
2664 {
2665  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2666  int ret;
2667 
2668  if (fgt->eof_in[ifp->index])
2669  return 0;
2670 
2671  fgt->eof_in[ifp->index] = 1;
2672 
2673  if (ifp->filter) {
2674  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2676 
2678  if (ret < 0)
2679  return ret;
2680  } else {
2681  if (ifp->format < 0) {
2682  // the filtergraph was never configured, use the fallback parameters
2683  ifp->format = ifp->opts.fallback->format;
2684  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2685  ifp->width = ifp->opts.fallback->width;
2686  ifp->height = ifp->opts.fallback->height;
2688  ifp->color_space = ifp->opts.fallback->colorspace;
2689  ifp->color_range = ifp->opts.fallback->color_range;
2690  ifp->time_base = ifp->opts.fallback->time_base;
2691 
2693  &ifp->opts.fallback->ch_layout);
2694  if (ret < 0)
2695  return ret;
2696 
2697  if (ifilter_has_all_input_formats(ifilter->graph)) {
2698  ret = configure_filtergraph(ifilter->graph, fgt);
2699  if (ret < 0) {
2700  av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
2701  return ret;
2702  }
2703  }
2704  }
2705 
2706  if (ifp->format < 0) {
2708  "Cannot determine format of input %s after EOF\n",
2709  ifp->opts.name);
2710  return AVERROR_INVALIDDATA;
2711  }
2712  }
2713 
2714  return 0;
2715 }
2716 
2718  VIDEO_CHANGED = (1 << 0),
2719  AUDIO_CHANGED = (1 << 1),
2720  MATRIX_CHANGED = (1 << 2),
2721  HWACCEL_CHANGED = (1 << 3)
2722 };
2723 
2724 static const char *unknown_if_null(const char *str)
2725 {
2726  return str ? str : "unknown";
2727 }
2728 
2730  InputFilter *ifilter, AVFrame *frame)
2731 {
2732  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2733  FrameData *fd;
2734  AVFrameSideData *sd;
2735  int need_reinit = 0, ret;
2736 
2737  /* determine if the parameters for this input changed */
2738  switch (ifp->type) {
2739  case AVMEDIA_TYPE_AUDIO:
2740  if (ifp->format != frame->format ||
2741  ifp->sample_rate != frame->sample_rate ||
2742  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2743  need_reinit |= AUDIO_CHANGED;
2744  break;
2745  case AVMEDIA_TYPE_VIDEO:
2746  if (ifp->format != frame->format ||
2747  ifp->width != frame->width ||
2748  ifp->height != frame->height ||
2749  ifp->color_space != frame->colorspace ||
2750  ifp->color_range != frame->color_range)
2751  need_reinit |= VIDEO_CHANGED;
2752  break;
2753  }
2754 
2756  if (!ifp->displaymatrix_present ||
2757  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2758  need_reinit |= MATRIX_CHANGED;
2759  } else if (ifp->displaymatrix_present)
2760  need_reinit |= MATRIX_CHANGED;
2761 
2762  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2763  need_reinit = 0;
2764 
2765  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2766  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2767  need_reinit |= HWACCEL_CHANGED;
2768 
2769  if (need_reinit) {
2771  if (ret < 0)
2772  return ret;
2773  }
2774 
2775  /* (re)init the graph if possible, otherwise buffer the frame and return */
2776  if (need_reinit || !fgt->graph) {
2777  AVFrame *tmp = av_frame_alloc();
2778 
2779  if (!tmp)
2780  return AVERROR(ENOMEM);
2781 
2782  if (!ifilter_has_all_input_formats(fg)) {
2784 
2785  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2786  if (ret < 0)
2787  av_frame_free(&tmp);
2788 
2789  return ret;
2790  }
2791 
2792  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2793  av_frame_free(&tmp);
2794  if (ret < 0)
2795  return ret;
2796 
2797  if (fgt->graph) {
2798  AVBPrint reason;
2800  if (need_reinit & AUDIO_CHANGED) {
2801  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2802  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2803  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2804  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2805  }
2806  if (need_reinit & VIDEO_CHANGED) {
2807  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2808  const char *color_space_name = av_color_space_name(frame->colorspace);
2809  const char *color_range_name = av_color_range_name(frame->color_range);
2810  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2811  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2812  unknown_if_null(color_space_name), frame->width, frame->height);
2813  }
2814  if (need_reinit & MATRIX_CHANGED)
2815  av_bprintf(&reason, "display matrix changed, ");
2816  if (need_reinit & HWACCEL_CHANGED)
2817  av_bprintf(&reason, "hwaccel changed, ");
2818  if (reason.len > 1)
2819  reason.str[reason.len - 2] = '\0'; // remove last comma
2820  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2821  }
2822 
2823  ret = configure_filtergraph(fg, fgt);
2824  if (ret < 0) {
2825  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2826  return ret;
2827  }
2828  }
2829 
2830  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2831  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2832  frame->time_base = ifp->time_base;
2833 
2834  if (ifp->displaymatrix_applied)
2836 
2837  fd = frame_data(frame);
2838  if (!fd)
2839  return AVERROR(ENOMEM);
2841 
2844  if (ret < 0) {
2846  if (ret != AVERROR_EOF)
2847  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2848  return ret;
2849  }
2850 
2851  return 0;
2852 }
2853 
2854 static void fg_thread_set_name(const FilterGraph *fg)
2855 {
2856  char name[16];
2857  if (filtergraph_is_simple(fg)) {
2858  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2859  snprintf(name, sizeof(name), "%cf%s",
2861  ofp->name);
2862  } else {
2863  snprintf(name, sizeof(name), "fc%d", fg->index);
2864  }
2865 
2867 }
2868 
2870 {
2871  if (fgt->frame_queue_out) {
2872  AVFrame *frame;
2873  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2874  av_frame_free(&frame);
2876  }
2877 
2878  av_frame_free(&fgt->frame);
2879  av_freep(&fgt->eof_in);
2880  av_freep(&fgt->eof_out);
2881 
2882  avfilter_graph_free(&fgt->graph);
2883 
2884  memset(fgt, 0, sizeof(*fgt));
2885 }
2886 
2887 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2888 {
2889  memset(fgt, 0, sizeof(*fgt));
2890 
2891  fgt->frame = av_frame_alloc();
2892  if (!fgt->frame)
2893  goto fail;
2894 
2895  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2896  if (!fgt->eof_in)
2897  goto fail;
2898 
2899  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2900  if (!fgt->eof_out)
2901  goto fail;
2902 
2904  if (!fgt->frame_queue_out)
2905  goto fail;
2906 
2907  return 0;
2908 
2909 fail:
2910  fg_thread_uninit(fgt);
2911  return AVERROR(ENOMEM);
2912 }
2913 
2914 static int filter_thread(void *arg)
2915 {
2916  FilterGraphPriv *fgp = arg;
2917  FilterGraph *fg = &fgp->fg;
2918 
2919  FilterGraphThread fgt;
2920  int ret = 0, input_status = 0;
2921 
2922  ret = fg_thread_init(&fgt, fg);
2923  if (ret < 0)
2924  goto finish;
2925 
2926  fg_thread_set_name(fg);
2927 
2928  // if we have all input parameters the graph can now be configured
2930  ret = configure_filtergraph(fg, &fgt);
2931  if (ret < 0) {
2932  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2933  av_err2str(ret));
2934  goto finish;
2935  }
2936  }
2937 
2938  while (1) {
2939  InputFilter *ifilter;
2940  InputFilterPriv *ifp;
2941  enum FrameOpaque o;
2942  unsigned input_idx = fgt.next_in;
2943 
2944  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2945  &input_idx, fgt.frame);
2946  if (input_status == AVERROR_EOF) {
2947  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
2948  break;
2949  } else if (input_status == AVERROR(EAGAIN)) {
2950  // should only happen when we didn't request any input
2951  av_assert0(input_idx == fg->nb_inputs);
2952  goto read_frames;
2953  }
2954  av_assert0(input_status >= 0);
2955 
2956  o = (intptr_t)fgt.frame->opaque;
2957 
2958  o = (intptr_t)fgt.frame->opaque;
2959 
2960  // message on the control stream
2961  if (input_idx == fg->nb_inputs) {
2962  FilterCommand *fc;
2963 
2964  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
2965 
2966  fc = (FilterCommand*)fgt.frame->buf[0]->data;
2967  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
2968  fc->all_filters);
2969  av_frame_unref(fgt.frame);
2970  continue;
2971  }
2972 
2973  // we received an input frame or EOF
2974  ifilter = fg->inputs[input_idx];
2975  ifp = ifp_from_ifilter(ifilter);
2976 
2977  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2978  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
2979  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
2980  !fgt.graph);
2981  } else if (fgt.frame->buf[0]) {
2982  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
2983  } else {
2985  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
2986  }
2987  av_frame_unref(fgt.frame);
2988  if (ret == AVERROR_EOF) {
2989  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
2990  input_idx);
2991  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
2992  continue;
2993  }
2994  if (ret < 0)
2995  goto finish;
2996 
2997 read_frames:
2998  // retrieve all newly avalable frames
2999  ret = read_frames(fg, &fgt, fgt.frame);
3000  if (ret == AVERROR_EOF) {
3001  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3002  break;
3003  } else if (ret < 0) {
3004  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3005  av_err2str(ret));
3006  goto finish;
3007  }
3008  }
3009 
3010  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3012 
3013  if (fgt.eof_out[i] || !fgt.graph)
3014  continue;
3015 
3016  ret = fg_output_frame(ofp, &fgt, NULL);
3017  if (ret < 0)
3018  goto finish;
3019  }
3020 
3021 finish:
3022  // EOF is normal termination
3023  if (ret == AVERROR_EOF)
3024  ret = 0;
3025 
3026  fg_thread_uninit(&fgt);
3027 
3028  return ret;
3029 }
3030 
3031 void fg_send_command(FilterGraph *fg, double time, const char *target,
3032  const char *command, const char *arg, int all_filters)
3033 {
3034  FilterGraphPriv *fgp = fgp_from_fg(fg);
3035  AVBufferRef *buf;
3036  FilterCommand *fc;
3037 
3038  fc = av_mallocz(sizeof(*fc));
3039  if (!fc)
3040  return;
3041 
3042  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3043  if (!buf) {
3044  av_freep(&fc);
3045  return;
3046  }
3047 
3048  fc->target = av_strdup(target);
3049  fc->command = av_strdup(command);
3050  fc->arg = av_strdup(arg);
3051  if (!fc->target || !fc->command || !fc->arg) {
3052  av_buffer_unref(&buf);
3053  return;
3054  }
3055 
3056  fc->time = time;
3057  fc->all_filters = all_filters;
3058 
3059  fgp->frame->buf[0] = buf;
3060  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3061 
3062  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3063 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:533
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:126
AVSubtitle
Definition: avcodec.h:2228
formats
formats
Definition: signature.h:48
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1799
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:95
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:693
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:653
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:198
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:616
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:317
av_clip
#define av_clip
Definition: common.h:99
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:121
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2393
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:315
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:198
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:105
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2233
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2056
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1107
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:65
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:69
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:97
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:839
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:138
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:969
FrameData
Definition: ffmpeg.h:630
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2031
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:153
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
OutputFilter::apad
char * apad
Definition: ffmpeg.h:325
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:288
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:947
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:111
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:337
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1990
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:648
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2721
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:61
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:140
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:243
AVSubtitleRect
Definition: avcodec.h:2201
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2232
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:990
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:173
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:522
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:664
InputFile::index
int index
Definition: ffmpeg.h:439
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:967
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
AVFrame::width
int width
Definition: frame.h:446
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:48
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:55
AVOption
AVOption.
Definition: opt.h:357
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2371
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:172
FilterGraph::index
int index
Definition: ffmpeg.h:335
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:137
data
const char data[16]
Definition: mxf.c:148
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:177
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:228
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1809
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:464
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:338
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2718
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:34
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:646
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:235
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:308
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:239
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:587
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2454
FilterGraphPriv
Definition: ffmpeg_filter.c:44
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:590
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:100
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:116
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1844
choose_pix_fmts
static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint, const char **dst)
Definition: ffmpeg_filter.c:370
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:194
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
InputStream
Definition: ffmpeg.h:394
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:75
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:69
OutputFilterOptions
Definition: ffmpeg.h:273
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:245
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:261
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:137
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:164
finish
static void finish(void)
Definition: movenc.c:373
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
offset must point to a pointer immediately followed by an int for the length
Definition: opt.h:251
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3341
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.c:188
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2869
fail
#define fail()
Definition: checkasm.h:182
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
OutputFilterPriv::name
char * name
Definition: ffmpeg_filter.c:196
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:313
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:74
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:260
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:618
val
static double val(void *priv, double ch)
Definition: aeval.c:78
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:191
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:775
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:120
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1655
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:747
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1825
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:87
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:106
FrameData::tb
AVRational tb
Definition: ffmpeg.h:640
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:215
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:73
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:203
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:178
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:322
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:246
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:799
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2593
check_stream_specifier
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:979
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:225
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:643
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2662
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:104
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1248
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:962
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1143
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:249
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:67
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:114
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:339
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:642
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
configure_output_video_filter
static int configure_output_video_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1476
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:323
InputFilter
Definition: ffmpeg.h:309
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:58
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, InputFilterOptions *opts)
Definition: ffmpeg_demux.c:984
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:270
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2234
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:2887
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:248
graph_opts_apply
static int graph_opts_apply(AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:550
init_simple_filtergraph
int init_simple_filtergraph(InputStream *ist, OutputStream *ost, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1192
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:310
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:230
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1046
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1186
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:221
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:202
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3281
AVFormatContext
Format I/O context.
Definition: avformat.h:1255
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:631
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:766
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
FilterGraphPriv::nb_threads
char * nb_threads
Definition: ffmpeg_filter.c:62
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:318
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1243
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:224
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:881
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:134
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:814
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:760
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:204
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1075
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:175
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:402
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts)
Definition: ffmpeg_dec.c:1370
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:159
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:146
Decoder
Definition: ffmpeg.h:380
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1086
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:269
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1144
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:815
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:118
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:905
filter_opt_apply
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:495
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:213
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:656
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2321
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:92
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
Definition: ffmpeg_filter.c:679
mathops.h
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1552
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:929
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:70
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:647
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1373
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1167
AVFilterGraph
Definition: avfilter.h:813
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:244
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:133
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:652
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:340
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:220
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:799
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:410
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:104
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:269
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:45
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:189
FilterGraph
Definition: ffmpeg.h:333
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1156
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1125
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:257
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:964
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:276
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:818
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2025
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:65
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1967
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:240
f
f
Definition: af_crystalizer.c:121
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
configure_output_filter
static int configure_output_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1634
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:2914
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:144
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:84
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:148
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:303
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:85
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:103
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:961
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:573
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:211
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:138
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2157
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:135
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2720
FilterCommand::time
double time
Definition: ffmpeg_filter.c:245
insert_trim
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1403
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:157
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:147
graph_parse
static int graph_parse(AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:574
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1291
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:501
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
read_binary
static int read_binary(const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:447
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:461
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:56
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:452
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2231
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:101
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:60
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1097
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:180
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1013
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:132
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1818
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1376
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2719
SCH_DEC
#define SCH_DEC(decoder)
Definition: ffmpeg_sched.h:117
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2328
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2724
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:258
decoders
Decoder ** decoders
Definition: ffmpeg.c:113
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:193
nb_decoders
int nb_decoders
Definition: ffmpeg.c:114
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:327
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2531
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:800
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2184
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:424
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2729
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:941
buffersink.h
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:830
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:246
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:185
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:169
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:116
FPSConvContext
Definition: ffmpeg_filter.c:166
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:109
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:645
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3031
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:50
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:263
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:191
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:422
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1758
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:183
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
InputFilterPriv::sub2video
struct InputFilterPriv::@6 sub2video
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:441
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:633
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:78
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:126
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:110
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:306
ofilter_bind_ost
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:800
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:311
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:897
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2438
AVFilter
Filter definition.
Definition: avfilter.h:166
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2199
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:161
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1060
mid_pred
#define mid_pred
Definition: mathops.h:98
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:97
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:743
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:311
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:774
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:167
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:455
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1454
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1127
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:229
choose_channel_layouts
static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
Definition: ffmpeg_filter.c:427
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2717
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:373
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:481
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:974
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:216
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:446
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:749
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:398
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:630
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:127
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:398
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1142
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:142
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:124
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:433
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:53
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:241
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:925
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2080
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:232
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:222
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:282
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1168
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:334
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:440
OutputFilter
Definition: ffmpeg.h:314
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2615
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:105
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:311
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:491
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:330
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:78
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:400
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2372
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:241
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1053
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:250
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:267
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:383
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:130
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:154
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:111
d
d
Definition: ffmpeg_filter.c:424
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:328
timestamp.h
OutputStream
Definition: mux.c:53
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:616
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:201
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1261
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:107
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:56
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:226
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec)
Definition: ffmpeg_filter.c:732
h
h
Definition: vp9dsp_template.c:2038
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:91
bind_inputs
static int bind_inputs(FilterGraph *fg)
Definition: ffmpeg_filter.c:1358
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:816
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:410
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:651
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:458
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:208
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:956
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:202
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:123
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:182
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2854
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:156
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1644
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2230
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:242
FilterCommand
Definition: ffmpeg_filter.c:240
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:132
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2885
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:329
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:76
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:255
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:184