FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/display.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/time.h"
40 #include "libavutil/timestamp.h"
41 
42 // FIXME private header, used for mid_pred()
43 #include "libavcodec/mathops.h"
44 
45 typedef struct FilterGraphPriv {
47 
48  // name used for logging
49  char log_name[32];
50 
51  int is_simple;
52  // true when the filtergraph contains only meta filters
53  // that do not modify the frame data
54  int is_meta;
55  // source filters are present in the graph
58 
59  unsigned nb_outputs_done;
60 
61  const char *graph_desc;
62 
63  // frame for temporarily holding output from the filtergraph
65  // frame for sending output to the encoder
67 
69  unsigned sch_idx;
71 
73 {
74  return (FilterGraphPriv*)fg;
75 }
76 
77 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
78 {
79  return (const FilterGraphPriv*)fg;
80 }
81 
82 // data that is local to the filter thread and not visible outside of it
83 typedef struct FilterGraphThread {
85 
87 
88  // Temporary buffer for output frames, since on filtergraph reset
89  // we cannot send them to encoders immediately.
90  // The output index is stored in frame opaque.
92 
93  // index of the next input to request from the scheduler
94  unsigned next_in;
95  // set to 1 after at least one frame passed through this output
96  int got_frame;
97 
98  // EOF status of each input/output, as received by the thread
99  uint8_t *eof_in;
100  uint8_t *eof_out;
102 
103 typedef struct InputFilterPriv {
105 
107 
108  int index;
109 
111 
113 
114  // used to hold submitted input
116 
117  /* for filters that are not yet bound to an input stream,
118  * this stores the input linklabel, if any */
119  uint8_t *linklabel;
120 
121  // filter data type
123  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
124  // same as type otherwise
126 
127  int eof;
128 
129  // parameters configured for this input
130  int format;
131 
132  int width, height;
136 
139 
141 
143 
145 
148 
149  // fallback parameters to use when no input is ever sent
150  struct {
151  int format;
152 
153  int width;
154  int height;
158 
159  int sample_rate;
161  } fallback;
162 
163  struct {
164  AVFrame *frame;
165 
166  int64_t last_pts;
167  int64_t end_pts;
168 
169  ///< marks if sub2video_update should force an initialization
170  unsigned int initialize;
171  } sub2video;
173 
175 {
176  return (InputFilterPriv*)ifilter;
177 }
178 
179 typedef struct FPSConvContext {
181  /* number of frames emitted by the video-encoding sync code */
182  int64_t frame_number;
183  /* history of nb_frames_prev, i.e. the number of times the
184  * previous frame was duplicated by vsync code in recent
185  * do_video_out() calls */
186  int64_t frames_prev_hist[3];
187 
188  uint64_t dup_warning;
189 
192 
198 
199 typedef struct OutputFilterPriv {
201 
202  int index;
203 
205 
206  /* desired output stream properties */
207  int format;
208  int width, height;
211 
212  // time base in which the output is sent to our downstream
213  // does not need to match the filtersink's timebase
215  // at least one frame with the above timebase was sent
216  // to our downstream, so it cannot change anymore
218 
220 
221  // those are only set if no format is specified and the encoder gives us multiple options
222  // They point directly to the relevant lists of the encoder.
223  const int *formats;
225  const int *sample_rates;
226 
228  // offset for output timestamps, in AV_TIME_BASE_Q
229  int64_t ts_offset;
230  int64_t next_pts;
233 
235 {
236  return (OutputFilterPriv*)ofilter;
237 }
238 
239 typedef struct FilterCommand {
240  char *target;
241  char *command;
242  char *arg;
243 
244  double time;
246 } FilterCommand;
247 
248 static void filter_command_free(void *opaque, uint8_t *data)
249 {
251 
252  av_freep(&fc->target);
253  av_freep(&fc->command);
254  av_freep(&fc->arg);
255 
256  av_free(data);
257 }
258 
260 {
261  AVFrame *frame = ifp->sub2video.frame;
262  int ret;
263 
265 
266  frame->width = ifp->width;
267  frame->height = ifp->height;
268  frame->format = ifp->format;
269  frame->colorspace = ifp->color_space;
270  frame->color_range = ifp->color_range;
271 
273  if (ret < 0)
274  return ret;
275 
276  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
277 
278  return 0;
279 }
280 
281 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
282  AVSubtitleRect *r)
283 {
284  uint32_t *pal, *dst2;
285  uint8_t *src, *src2;
286  int x, y;
287 
288  if (r->type != SUBTITLE_BITMAP) {
289  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
290  return;
291  }
292  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
293  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
294  r->x, r->y, r->w, r->h, w, h
295  );
296  return;
297  }
298 
299  dst += r->y * dst_linesize + r->x * 4;
300  src = r->data[0];
301  pal = (uint32_t *)r->data[1];
302  for (y = 0; y < r->h; y++) {
303  dst2 = (uint32_t *)dst;
304  src2 = src;
305  for (x = 0; x < r->w; x++)
306  *(dst2++) = pal[*(src2++)];
307  dst += dst_linesize;
308  src += r->linesize[0];
309  }
310 }
311 
312 static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
313 {
314  AVFrame *frame = ifp->sub2video.frame;
315  int ret;
316 
317  av_assert1(frame->data[0]);
318  ifp->sub2video.last_pts = frame->pts = pts;
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
324  av_err2str(ret));
325 }
326 
327 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
328  const AVSubtitle *sub)
329 {
330  AVFrame *frame = ifp->sub2video.frame;
331  int8_t *dst;
332  int dst_linesize;
333  int num_rects;
334  int64_t pts, end_pts;
335 
336  if (sub) {
337  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
338  AV_TIME_BASE_Q, ifp->time_base);
339  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
340  AV_TIME_BASE_Q, ifp->time_base);
341  num_rects = sub->num_rects;
342  } else {
343  /* If we are initializing the system, utilize current heartbeat
344  PTS as the start time, and show until the following subpicture
345  is received. Otherwise, utilize the previous subpicture's end time
346  as the fall-back value. */
347  pts = ifp->sub2video.initialize ?
348  heartbeat_pts : ifp->sub2video.end_pts;
349  end_pts = INT64_MAX;
350  num_rects = 0;
351  }
352  if (sub2video_get_blank_frame(ifp) < 0) {
354  "Impossible to get a blank canvas.\n");
355  return;
356  }
357  dst = frame->data [0];
358  dst_linesize = frame->linesize[0];
359  for (int i = 0; i < num_rects; i++)
360  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
361  sub2video_push_ref(ifp, pts);
362  ifp->sub2video.end_pts = end_pts;
363  ifp->sub2video.initialize = 0;
364 }
365 
366 /* *dst may return be set to NULL (no pixel format found), a static string or a
367  * string backed by the bprint. Nothing has been written to the AVBPrint in case
368  * NULL is returned. The AVBPrint provided should be clean. */
369 static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint,
370  const char **dst)
371 {
372  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
373  OutputStream *ost = ofilter->ost;
374 
375  *dst = NULL;
376 
377  if (ost->keep_pix_fmt || ofp->format != AV_PIX_FMT_NONE) {
378  *dst = ofp->format == AV_PIX_FMT_NONE ? NULL :
380  } else if (ofp->formats) {
381  const enum AVPixelFormat *p = ofp->formats;
382 
383  for (; *p != AV_PIX_FMT_NONE; p++) {
384  const char *name = av_get_pix_fmt_name(*p);
385  av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
386  }
387  if (!av_bprint_is_complete(bprint))
388  return AVERROR(ENOMEM);
389 
390  *dst = bprint->str;
391  }
392 
393  return 0;
394 }
395 
396 /* Define a function for appending a list of allowed formats
397  * to an AVBPrint. If nonempty, the list will have a header. */
398 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
399 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
400 { \
401  if (ofp->var == none && !ofp->supported_list) \
402  return; \
403  av_bprintf(bprint, #name "="); \
404  if (ofp->var != none) { \
405  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
406  } else { \
407  const type *p; \
408  \
409  for (p = ofp->supported_list; *p != none; p++) { \
410  av_bprintf(bprint, printf_format "|", get_name(*p)); \
411  } \
412  if (bprint->len > 0) \
413  bprint->str[--bprint->len] = '\0'; \
414  } \
415  av_bprint_chars(bprint, ':', 1); \
416 }
417 
418 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
419 // GET_PIX_FMT_NAME)
420 
423 
425  "%d", )
426 
427 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
428 {
429  if (av_channel_layout_check(&ofp->ch_layout)) {
430  av_bprintf(bprint, "channel_layouts=");
431  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
432  } else if (ofp->ch_layouts) {
433  const AVChannelLayout *p;
434 
435  av_bprintf(bprint, "channel_layouts=");
436  for (p = ofp->ch_layouts; p->nb_channels; p++) {
438  av_bprintf(bprint, "|");
439  }
440  if (bprint->len > 0)
441  bprint->str[--bprint->len] = '\0';
442  } else
443  return;
444  av_bprint_chars(bprint, ':', 1);
445 }
446 
447 static int read_binary(const char *path, uint8_t **data, int *len)
448 {
449  AVIOContext *io = NULL;
450  int64_t fsize;
451  int ret;
452 
453  *data = NULL;
454  *len = 0;
455 
456  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
457  if (ret < 0) {
458  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
459  path, av_err2str(ret));
460  return ret;
461  }
462 
463  fsize = avio_size(io);
464  if (fsize < 0 || fsize > INT_MAX) {
465  av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
466  ret = AVERROR(EIO);
467  goto fail;
468  }
469 
470  *data = av_malloc(fsize);
471  if (!*data) {
472  ret = AVERROR(ENOMEM);
473  goto fail;
474  }
475 
476  ret = avio_read(io, *data, fsize);
477  if (ret != fsize) {
478  av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
479  ret = ret < 0 ? ret : AVERROR(EIO);
480  goto fail;
481  }
482 
483  *len = fsize;
484 
485  ret = 0;
486 fail:
487  avio_close(io);
488  if (ret < 0) {
489  av_freep(data);
490  *len = 0;
491  }
492  return ret;
493 }
494 
495 static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
496 {
497  const AVOption *o = NULL;
498  int ret;
499 
501  if (ret >= 0)
502  return 0;
503 
504  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
506  if (!o)
507  goto err_apply;
508 
509  // key is a valid option name prefixed with '/'
510  // interpret value as a path from which to load the actual option value
511  key++;
512 
513  if (o->type == AV_OPT_TYPE_BINARY) {
514  uint8_t *data;
515  int len;
516 
517  ret = read_binary(val, &data, &len);
518  if (ret < 0)
519  goto err_load;
520 
522  av_freep(&data);
523  } else {
524  char *data = file_read(val);
525  if (!data) {
526  ret = AVERROR(EIO);
527  goto err_load;
528  }
529 
531  av_freep(&data);
532  }
533  if (ret < 0)
534  goto err_apply;
535 
536  return 0;
537 
538 err_apply:
540  "Error applying option '%s' to filter '%s': %s\n",
541  key, f->filter->name, av_err2str(ret));
542  return ret;
543 err_load:
545  "Error loading value for option '%s' from file '%s'\n",
546  key, val);
547  return ret;
548 }
549 
551 {
552  for (size_t i = 0; i < seg->nb_chains; i++) {
553  AVFilterChain *ch = seg->chains[i];
554 
555  for (size_t j = 0; j < ch->nb_filters; j++) {
556  AVFilterParams *p = ch->filters[j];
557  const AVDictionaryEntry *e = NULL;
558 
559  av_assert0(p->filter);
560 
561  while ((e = av_dict_iterate(p->opts, e))) {
562  int ret = filter_opt_apply(p->filter, e->key, e->value);
563  if (ret < 0)
564  return ret;
565  }
566 
567  av_dict_free(&p->opts);
568  }
569  }
570 
571  return 0;
572 }
573 
574 static int graph_parse(AVFilterGraph *graph, const char *desc,
576  AVBufferRef *hw_device)
577 {
579  int ret;
580 
581  *inputs = NULL;
582  *outputs = NULL;
583 
584  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
585  if (ret < 0)
586  return ret;
587 
589  if (ret < 0)
590  goto fail;
591 
592  if (hw_device) {
593  for (int i = 0; i < graph->nb_filters; i++) {
594  AVFilterContext *f = graph->filters[i];
595 
596  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
597  continue;
598  f->hw_device_ctx = av_buffer_ref(hw_device);
599  if (!f->hw_device_ctx) {
600  ret = AVERROR(ENOMEM);
601  goto fail;
602  }
603  }
604  }
605 
606  ret = graph_opts_apply(seg);
607  if (ret < 0)
608  goto fail;
609 
611 
612 fail:
614  return ret;
615 }
616 
617 // Filters can be configured only if the formats of all inputs are known.
619 {
620  for (int i = 0; i < fg->nb_inputs; i++) {
622  if (ifp->format < 0)
623  return 0;
624  }
625  return 1;
626 }
627 
628 static void *filter_thread(void *arg);
629 
630 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
631 {
632  AVFilterContext *ctx = inout->filter_ctx;
633  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
634  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
635 
636  if (nb_pads > 1)
637  return av_strdup(ctx->filter->name);
638  return av_asprintf("%s:%s", ctx->filter->name,
639  avfilter_pad_get_name(pads, inout->pad_idx));
640 }
641 
643 {
644  OutputFilterPriv *ofp;
645  OutputFilter *ofilter;
646 
647  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
648  if (!ofp)
649  return NULL;
650 
651  ofilter = &ofp->ofilter;
652  ofilter->graph = fg;
653  ofp->format = -1;
654  ofp->index = fg->nb_outputs - 1;
655 
656  return ofilter;
657 }
658 
659 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
660 {
661  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
662  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
663  int ret, dec_idx;
664 
665  av_assert0(!ifp->ist);
666 
667  if (ifp->type != ist->par->codec_type &&
669  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
671  return AVERROR(EINVAL);
672  }
673 
674  ifp->ist = ist;
675  ifp->type_src = ist->st->codecpar->codec_type;
676 
677  dec_idx = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
678  &ifp->opts);
679  if (dec_idx < 0)
680  return dec_idx;
681 
682  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
683  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
684  if (ret < 0)
685  return ret;
686 
687  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
688  ifp->sub2video.frame = av_frame_alloc();
689  if (!ifp->sub2video.frame)
690  return AVERROR(ENOMEM);
691 
692  ifp->width = ifp->opts.sub2video_width;
693  ifp->height = ifp->opts.sub2video_height;
694 
695  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
696  palettes for all rectangles are identical or compatible */
697  ifp->format = AV_PIX_FMT_RGB32;
698 
699  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
700  ifp->width, ifp->height);
701  }
702 
703  return 0;
704 }
705 
707 {
708  const AVCodec *c = ost->enc_ctx->codec;
709  int i, err;
710 
711  if (ost->enc_ctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
712  /* Pass the layout through for all orders but UNSPEC */
713  err = av_channel_layout_copy(&f->ch_layout, &ost->enc_ctx->ch_layout);
714  if (err < 0)
715  return err;
716  return 0;
717  }
718 
719  /* Requested layout is of order UNSPEC */
720  if (!c->ch_layouts) {
721  /* Use the default native layout for the requested amount of channels when the
722  encoder doesn't have a list of supported layouts */
723  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
724  return 0;
725  }
726  /* Encoder has a list of supported layouts. Pick the first layout in it with the
727  same amount of channels as the requested layout */
728  for (i = 0; c->ch_layouts[i].nb_channels; i++) {
729  if (c->ch_layouts[i].nb_channels == ost->enc_ctx->ch_layout.nb_channels)
730  break;
731  }
732  if (c->ch_layouts[i].nb_channels) {
733  /* Use it if one is found */
734  err = av_channel_layout_copy(&f->ch_layout, &c->ch_layouts[i]);
735  if (err < 0)
736  return err;
737  return 0;
738  }
739  /* If no layout for the amount of channels requested was found, use the default
740  native layout for it. */
741  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
742 
743  return 0;
744 }
745 
747  unsigned sched_idx_enc)
748 {
749  const OutputFile *of = ost->file;
750  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
751  FilterGraph *fg = ofilter->graph;
752  FilterGraphPriv *fgp = fgp_from_fg(fg);
753  const AVCodec *c = ost->enc_ctx->codec;
754  int ret;
755 
756  av_assert0(!ofilter->ost);
757 
758  ofilter->ost = ost;
759  av_freep(&ofilter->linklabel);
760 
761  ofp->ts_offset = of->start_time == AV_NOPTS_VALUE ? 0 : of->start_time;
762  ofp->enc_timebase = ost->enc_timebase;
763 
764  switch (ost->enc_ctx->codec_type) {
765  case AVMEDIA_TYPE_VIDEO:
766  ofp->width = ost->enc_ctx->width;
767  ofp->height = ost->enc_ctx->height;
768  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
769  ofp->format = ost->enc_ctx->pix_fmt;
770  } else {
771  ofp->formats = c->pix_fmts;
772 
773  // MJPEG encoder exports a full list of supported pixel formats,
774  // but the full-range ones are experimental-only.
775  // Restrict the auto-conversion list unless -strict experimental
776  // has been specified.
777  if (!strcmp(c->name, "mjpeg")) {
778  // FIXME: YUV420P etc. are actually supported with full color range,
779  // yet the latter information isn't available here.
780  static const enum AVPixelFormat mjpeg_formats[] =
782  AV_PIX_FMT_NONE };
783 
784  const AVDictionaryEntry *strict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
785  int strict_val = ost->enc_ctx->strict_std_compliance;
786 
787  if (strict) {
788  const AVOption *o = av_opt_find(ost->enc_ctx, strict->key, NULL, 0, 0);
789  av_assert0(o);
790  av_opt_eval_int(ost->enc_ctx, o, strict->value, &strict_val);
791  }
792 
793  if (strict_val > FF_COMPLIANCE_UNOFFICIAL)
794  ofp->formats = mjpeg_formats;
795  }
796  }
797 
798  fgp->disable_conversions |= ost->keep_pix_fmt;
799 
800  ofp->fps.last_frame = av_frame_alloc();
801  if (!ofp->fps.last_frame)
802  return AVERROR(ENOMEM);
803 
804  ofp->fps.framerate = ost->frame_rate;
805  ofp->fps.framerate_max = ost->max_frame_rate;
806  ofp->fps.framerate_supported = ost->force_fps ?
807  NULL : c->supported_framerates;
808 
809  // reduce frame rate for mpeg4 to be within the spec limits
810  if (c->id == AV_CODEC_ID_MPEG4)
811  ofp->fps.framerate_clip = 65535;
812 
813  ofp->fps.dup_warning = 1000;
814 
815  break;
816  case AVMEDIA_TYPE_AUDIO:
817  if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
818  ofp->format = ost->enc_ctx->sample_fmt;
819  } else {
820  ofp->formats = c->sample_fmts;
821  }
822  if (ost->enc_ctx->sample_rate) {
823  ofp->sample_rate = ost->enc_ctx->sample_rate;
824  } else {
825  ofp->sample_rates = c->supported_samplerates;
826  }
827  if (ost->enc_ctx->ch_layout.nb_channels) {
828  int ret = set_channel_layout(ofp, ost);
829  if (ret < 0)
830  return ret;
831  } else if (c->ch_layouts) {
832  ofp->ch_layouts = c->ch_layouts;
833  }
834  break;
835  }
836 
837  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
838  SCH_ENC(sched_idx_enc));
839  if (ret < 0)
840  return ret;
841 
842  return 0;
843 }
844 
846 {
847  InputFilterPriv *ifp;
848  InputFilter *ifilter;
849 
850  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
851  if (!ifp)
852  return NULL;
853 
854  ifilter = &ifp->ifilter;
855  ifilter->graph = fg;
856 
857  ifp->frame = av_frame_alloc();
858  if (!ifp->frame)
859  return NULL;
860 
861  ifp->index = fg->nb_inputs - 1;
862  ifp->format = -1;
863  ifp->fallback.format = -1;
866 
868  if (!ifp->frame_queue)
869  return NULL;
870 
871  return ifilter;
872 }
873 
874 void fg_free(FilterGraph **pfg)
875 {
876  FilterGraph *fg = *pfg;
877  FilterGraphPriv *fgp;
878 
879  if (!fg)
880  return;
881  fgp = fgp_from_fg(fg);
882 
883  for (int j = 0; j < fg->nb_inputs; j++) {
884  InputFilter *ifilter = fg->inputs[j];
885  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
886 
887  if (ifp->frame_queue) {
888  AVFrame *frame;
889  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
892  }
894 
896 
897  av_frame_free(&ifp->frame);
898 
900  av_freep(&ifp->linklabel);
901  av_freep(&ifp->opts.name);
902  av_freep(&ifilter->name);
903  av_freep(&fg->inputs[j]);
904  }
905  av_freep(&fg->inputs);
906  for (int j = 0; j < fg->nb_outputs; j++) {
907  OutputFilter *ofilter = fg->outputs[j];
908  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
909 
911 
912  av_freep(&ofilter->linklabel);
913  av_freep(&ofilter->name);
915  av_freep(&fg->outputs[j]);
916  }
917  av_freep(&fg->outputs);
918  av_freep(&fgp->graph_desc);
919 
920  av_frame_free(&fgp->frame);
921  av_frame_free(&fgp->frame_enc);
922 
923  av_freep(pfg);
924 }
925 
926 static const char *fg_item_name(void *obj)
927 {
928  const FilterGraphPriv *fgp = obj;
929 
930  return fgp->log_name;
931 }
932 
933 static const AVClass fg_class = {
934  .class_name = "FilterGraph",
935  .version = LIBAVUTIL_VERSION_INT,
936  .item_name = fg_item_name,
937  .category = AV_CLASS_CATEGORY_FILTER,
938 };
939 
940 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
941 {
942  FilterGraphPriv *fgp;
943  FilterGraph *fg;
944 
946  AVFilterGraph *graph;
947  int ret = 0;
948 
949  fgp = allocate_array_elem(&filtergraphs, sizeof(*fgp), &nb_filtergraphs);
950  if (!fgp)
951  return AVERROR(ENOMEM);
952  fg = &fgp->fg;
953 
954  if (pfg)
955  *pfg = fg;
956 
957  fg->class = &fg_class;
958  fg->index = nb_filtergraphs - 1;
959  fgp->graph_desc = graph_desc;
961  fgp->sch = sch;
962 
963  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
964 
965  fgp->frame = av_frame_alloc();
966  fgp->frame_enc = av_frame_alloc();
967  if (!fgp->frame || !fgp->frame_enc)
968  return AVERROR(ENOMEM);
969 
970  /* this graph is only used for determining the kinds of inputs
971  * and outputs we have, and is discarded on exit from this function */
972  graph = avfilter_graph_alloc();
973  if (!graph)
974  return AVERROR(ENOMEM);;
975  graph->nb_threads = 1;
976 
977  ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
978  if (ret < 0)
979  goto fail;
980 
981  for (unsigned i = 0; i < graph->nb_filters; i++) {
982  const AVFilter *f = graph->filters[i]->filter;
983  if (!avfilter_filter_pad_count(f, 0) &&
984  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) {
985  fgp->have_sources = 1;
986  break;
987  }
988  }
989 
990  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
991  InputFilter *const ifilter = ifilter_alloc(fg);
992  InputFilterPriv *ifp;
993 
994  if (!ifilter) {
995  ret = AVERROR(ENOMEM);
996  goto fail;
997  }
998 
999  ifp = ifp_from_ifilter(ifilter);
1000  ifp->linklabel = cur->name;
1001  cur->name = NULL;
1002 
1003  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1004  cur->pad_idx);
1005  ifilter->name = describe_filter_link(fg, cur, 1);
1006  if (!ifilter->name) {
1007  ret = AVERROR(ENOMEM);
1008  goto fail;
1009  }
1010  }
1011 
1012  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1013  OutputFilter *const ofilter = ofilter_alloc(fg);
1014 
1015  if (!ofilter) {
1016  ret = AVERROR(ENOMEM);
1017  goto fail;
1018  }
1019 
1020  ofilter->linklabel = cur->name;
1021  cur->name = NULL;
1022 
1023  ofilter->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1024  cur->pad_idx);
1025  ofilter->name = describe_filter_link(fg, cur, 0);
1026  if (!ofilter->name) {
1027  ret = AVERROR(ENOMEM);
1028  goto fail;
1029  }
1030  }
1031 
1032  if (!fg->nb_outputs) {
1033  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1034  ret = AVERROR(ENOSYS);
1035  goto fail;
1036  }
1037 
1038  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1039  filter_thread, fgp);
1040  if (ret < 0)
1041  goto fail;
1042  fgp->sch_idx = ret;
1043 
1044 fail:
1047  avfilter_graph_free(&graph);
1048 
1049  if (ret < 0)
1050  return ret;
1051 
1052  return 0;
1053 }
1054 
1056  char *graph_desc,
1057  Scheduler *sch, unsigned sched_idx_enc)
1058 {
1059  FilterGraph *fg;
1060  FilterGraphPriv *fgp;
1061  int ret;
1062 
1063  ret = fg_create(&fg, graph_desc, sch);
1064  if (ret < 0)
1065  return ret;
1066  fgp = fgp_from_fg(fg);
1067 
1068  fgp->is_simple = 1;
1069 
1070  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf#%d:%d",
1071  av_get_media_type_string(ost->type)[0],
1072  ost->file->index, ost->index);
1073 
1074  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1075  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1076  "to have exactly 1 input and 1 output. "
1077  "However, it had %d input(s) and %d output(s). Please adjust, "
1078  "or use a complex filtergraph (-filter_complex) instead.\n",
1079  graph_desc, fg->nb_inputs, fg->nb_outputs);
1080  return AVERROR(EINVAL);
1081  }
1082 
1083  ost->filter = fg->outputs[0];
1084 
1085  ret = ifilter_bind_ist(fg->inputs[0], ist);
1086  if (ret < 0)
1087  return ret;
1088 
1089  ret = ofilter_bind_ost(fg->outputs[0], ost, sched_idx_enc);
1090  if (ret < 0)
1091  return ret;
1092 
1093  return 0;
1094 }
1095 
1096 static int init_input_filter(FilterGraph *fg, InputFilter *ifilter)
1097 {
1098  FilterGraphPriv *fgp = fgp_from_fg(fg);
1099  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1100  InputStream *ist = NULL;
1101  enum AVMediaType type = ifp->type;
1102  int i, ret;
1103 
1104  // TODO: support other filter types
1106  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1107  "currently.\n");
1108  return AVERROR(ENOSYS);
1109  }
1110 
1111  if (ifp->linklabel) {
1112  AVFormatContext *s;
1113  AVStream *st = NULL;
1114  char *p;
1115  int file_idx = strtol(ifp->linklabel, &p, 0);
1116 
1117  if (file_idx < 0 || file_idx >= nb_input_files) {
1118  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1119  file_idx, fgp->graph_desc);
1120  return AVERROR(EINVAL);
1121  }
1122  s = input_files[file_idx]->ctx;
1123 
1124  for (i = 0; i < s->nb_streams; i++) {
1125  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1126  if (stream_type != type &&
1127  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1128  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1129  continue;
1130  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
1131  st = s->streams[i];
1132  break;
1133  }
1134  }
1135  if (!st) {
1136  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1137  "matches no streams.\n", p, fgp->graph_desc);
1138  return AVERROR(EINVAL);
1139  }
1140  ist = input_files[file_idx]->streams[st->index];
1141  } else {
1142  ist = ist_find_unused(type);
1143  if (!ist) {
1144  av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
1145  "unlabeled input pad %s\n", ifilter->name);
1146  return AVERROR(EINVAL);
1147  }
1148  }
1149  av_assert0(ist);
1150 
1151  ret = ifilter_bind_ist(ifilter, ist);
1152  if (ret < 0) {
1153  av_log(fg, AV_LOG_ERROR,
1154  "Error binding an input stream to complex filtergraph input %s.\n",
1155  ifilter->name);
1156  return ret;
1157  }
1158 
1159  return 0;
1160 }
1161 
1163 {
1164  // bind filtergraph inputs to input streams
1165  for (int i = 0; i < fg->nb_inputs; i++) {
1166  int ret = init_input_filter(fg, fg->inputs[i]);
1167  if (ret < 0)
1168  return ret;
1169  }
1170  return 0;
1171 }
1172 
1173 static int insert_trim(int64_t start_time, int64_t duration,
1174  AVFilterContext **last_filter, int *pad_idx,
1175  const char *filter_name)
1176 {
1177  AVFilterGraph *graph = (*last_filter)->graph;
1179  const AVFilter *trim;
1180  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1181  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1182  int ret = 0;
1183 
1184  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1185  return 0;
1186 
1187  trim = avfilter_get_by_name(name);
1188  if (!trim) {
1189  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
1190  "recording time.\n", name);
1191  return AVERROR_FILTER_NOT_FOUND;
1192  }
1193 
1194  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1195  if (!ctx)
1196  return AVERROR(ENOMEM);
1197 
1198  if (duration != INT64_MAX) {
1199  ret = av_opt_set_int(ctx, "durationi", duration,
1201  }
1202  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1203  ret = av_opt_set_int(ctx, "starti", start_time,
1205  }
1206  if (ret < 0) {
1207  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1208  return ret;
1209  }
1210 
1212  if (ret < 0)
1213  return ret;
1214 
1215  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1216  if (ret < 0)
1217  return ret;
1218 
1219  *last_filter = ctx;
1220  *pad_idx = 0;
1221  return 0;
1222 }
1223 
1224 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1225  const char *filter_name, const char *args)
1226 {
1227  AVFilterGraph *graph = (*last_filter)->graph;
1229  int ret;
1230 
1232  avfilter_get_by_name(filter_name),
1233  filter_name, args, NULL, graph);
1234  if (ret < 0)
1235  return ret;
1236 
1237  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1238  if (ret < 0)
1239  return ret;
1240 
1241  *last_filter = ctx;
1242  *pad_idx = 0;
1243  return 0;
1244 }
1245 
1247  OutputFilter *ofilter, AVFilterInOut *out)
1248 {
1249  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1250  OutputStream *ost = ofilter->ost;
1251  OutputFile *of = ost->file;
1252  AVFilterContext *last_filter = out->filter_ctx;
1253  AVBPrint bprint;
1254  int pad_idx = out->pad_idx;
1255  int ret;
1256  const char *pix_fmts;
1257  char name[255];
1258 
1259  snprintf(name, sizeof(name), "out_%d_%d", ost->file->index, ost->index);
1261  avfilter_get_by_name("buffersink"),
1262  name, NULL, NULL, graph);
1263 
1264  if (ret < 0)
1265  return ret;
1266 
1267  if ((ofp->width || ofp->height) && ofilter->ost->autoscale) {
1268  char args[255];
1270  const AVDictionaryEntry *e = NULL;
1271 
1272  snprintf(args, sizeof(args), "%d:%d",
1273  ofp->width, ofp->height);
1274 
1275  while ((e = av_dict_iterate(ost->sws_dict, e))) {
1276  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1277  }
1278 
1279  snprintf(name, sizeof(name), "scaler_out_%d_%d",
1280  ost->file->index, ost->index);
1282  name, args, NULL, graph)) < 0)
1283  return ret;
1284  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1285  return ret;
1286 
1287  last_filter = filter;
1288  pad_idx = 0;
1289  }
1290 
1292  ret = choose_pix_fmts(ofilter, &bprint, &pix_fmts);
1293  if (ret < 0)
1294  return ret;
1295 
1296  if (pix_fmts) {
1298 
1300  avfilter_get_by_name("format"),
1301  "format", pix_fmts, NULL, graph);
1302  av_bprint_finalize(&bprint, NULL);
1303  if (ret < 0)
1304  return ret;
1305  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1306  return ret;
1307 
1308  last_filter = filter;
1309  pad_idx = 0;
1310  }
1311 
1312  snprintf(name, sizeof(name), "trim_out_%d_%d",
1313  ost->file->index, ost->index);
1315  &last_filter, &pad_idx, name);
1316  if (ret < 0)
1317  return ret;
1318 
1319 
1320  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1321  return ret;
1322 
1323  return 0;
1324 }
1325 
1327  OutputFilter *ofilter, AVFilterInOut *out)
1328 {
1329  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1330  OutputStream *ost = ofilter->ost;
1331  OutputFile *of = ost->file;
1332  AVFilterContext *last_filter = out->filter_ctx;
1333  int pad_idx = out->pad_idx;
1334  AVBPrint args;
1335  char name[255];
1336  int ret;
1337 
1338  snprintf(name, sizeof(name), "out_%d_%d", ost->file->index, ost->index);
1340  avfilter_get_by_name("abuffersink"),
1341  name, NULL, NULL, graph);
1342  if (ret < 0)
1343  return ret;
1344  if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1345  return ret;
1346 
1347 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1348  AVFilterContext *filt_ctx; \
1349  \
1350  av_log(fg, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1351  "similarly to -af " filter_name "=%s.\n", arg); \
1352  \
1353  ret = avfilter_graph_create_filter(&filt_ctx, \
1354  avfilter_get_by_name(filter_name), \
1355  filter_name, arg, NULL, graph); \
1356  if (ret < 0) \
1357  goto fail; \
1358  \
1359  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1360  if (ret < 0) \
1361  goto fail; \
1362  \
1363  last_filter = filt_ctx; \
1364  pad_idx = 0; \
1365 } while (0)
1367 #if FFMPEG_OPT_MAP_CHANNEL
1368  if (ost->audio_channels_mapped) {
1369  AVChannelLayout mapped_layout = { 0 };
1370  av_channel_layout_default(&mapped_layout, ost->audio_channels_mapped);
1371  av_channel_layout_describe_bprint(&mapped_layout, &args);
1372  for (int i = 0; i < ost->audio_channels_mapped; i++)
1373  if (ost->audio_channels_map[i] != -1)
1374  av_bprintf(&args, "|c%d=c%d", i, ost->audio_channels_map[i]);
1375 
1376  AUTO_INSERT_FILTER("-map_channel", "pan", args.str);
1377  av_bprint_clear(&args);
1378  }
1379 #endif
1380 
1381  choose_sample_fmts(ofp, &args);
1382  choose_sample_rates(ofp, &args);
1383  choose_channel_layouts(ofp, &args);
1384  if (!av_bprint_is_complete(&args)) {
1385  ret = AVERROR(ENOMEM);
1386  goto fail;
1387  }
1388  if (args.len) {
1390 
1391  snprintf(name, sizeof(name), "format_out_%d_%d",
1392  ost->file->index, ost->index);
1394  avfilter_get_by_name("aformat"),
1395  name, args.str, NULL, graph);
1396  if (ret < 0)
1397  goto fail;
1398 
1399  ret = avfilter_link(last_filter, pad_idx, format, 0);
1400  if (ret < 0)
1401  goto fail;
1402 
1403  last_filter = format;
1404  pad_idx = 0;
1405  }
1406 
1407  if (ost->apad && of->shortest) {
1408  int i;
1409 
1410  for (i = 0; i < of->nb_streams; i++)
1412  break;
1413 
1414  if (i < of->nb_streams) {
1415  AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
1416  }
1417  }
1418 
1419  snprintf(name, sizeof(name), "trim for output stream %d:%d",
1420  ost->file->index, ost->index);
1422  &last_filter, &pad_idx, name);
1423  if (ret < 0)
1424  goto fail;
1425 
1426  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1427  goto fail;
1428 fail:
1429  av_bprint_finalize(&args, NULL);
1430 
1431  return ret;
1432 }
1433 
1435  OutputFilter *ofilter, AVFilterInOut *out)
1436 {
1437  if (!ofilter->ost) {
1438  av_log(fg, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
1439  return AVERROR(EINVAL);
1440  }
1441 
1442  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
1443  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, graph, ofilter, out);
1444  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, graph, ofilter, out);
1445  default: av_assert0(0); return 0;
1446  }
1447 }
1448 
1450 {
1451  for (int i = 0; i < nb_filtergraphs; i++) {
1452  int n;
1453  for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
1455  if (!output->ost) {
1457  "Filter %s has an unconnected output\n", output->name);
1458  return AVERROR(EINVAL);
1459  }
1460  }
1461  }
1462 
1463  return 0;
1464 }
1465 
1467 {
1468  ifp->sub2video.last_pts = INT64_MIN;
1469  ifp->sub2video.end_pts = INT64_MIN;
1470 
1471  /* sub2video structure has been (re-)initialized.
1472  Mark it as such so that the system will be
1473  initialized with the first received heartbeat. */
1474  ifp->sub2video.initialize = 1;
1475 }
1476 
1478  InputFilter *ifilter, AVFilterInOut *in)
1479 {
1480  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1481 
1482  AVFilterContext *last_filter;
1483  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1484  const AVPixFmtDescriptor *desc;
1485  InputStream *ist = ifp->ist;
1486  AVRational fr = ifp->opts.framerate;
1487  AVRational sar;
1488  AVBPrint args;
1489  char name[255];
1490  int ret, pad_idx = 0;
1492  if (!par)
1493  return AVERROR(ENOMEM);
1494 
1495  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1496  sub2video_prepare(ifp);
1497 
1498  ifp->time_base = (ifp->opts.flags & IFILTER_FLAG_CFR) ?
1499  av_inv_q(ifp->opts.framerate) : ist->st->time_base;
1500 
1501  sar = ifp->sample_aspect_ratio;
1502  if(!sar.den)
1503  sar = (AVRational){0,1};
1505  av_bprintf(&args,
1506  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1507  "pixel_aspect=%d/%d:colorspace=%d:range=%d",
1508  ifp->width, ifp->height, ifp->format,
1509  ifp->time_base.num, ifp->time_base.den, sar.num, sar.den,
1510  ifp->color_space, ifp->color_range);
1511  if (fr.num && fr.den)
1512  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1513  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1514  ifp->opts.name);
1515 
1516 
1517  if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
1518  args.str, NULL, graph)) < 0)
1519  goto fail;
1520  par->hw_frames_ctx = ifp->hw_frames_ctx;
1521  ret = av_buffersrc_parameters_set(ifp->filter, par);
1522  if (ret < 0)
1523  goto fail;
1524  av_freep(&par);
1525  last_filter = ifp->filter;
1526 
1528  av_assert0(desc);
1529 
1530  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1531  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1532  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1533  int32_t *displaymatrix = ifp->displaymatrix;
1534  double theta;
1535 
1536  theta = get_rotation(displaymatrix);
1537 
1538  if (fabs(theta - 90) < 1.0) {
1539  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1540  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1541  } else if (fabs(theta - 180) < 1.0) {
1542  if (displaymatrix[0] < 0) {
1543  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1544  if (ret < 0)
1545  return ret;
1546  }
1547  if (displaymatrix[4] < 0) {
1548  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1549  }
1550  } else if (fabs(theta - 270) < 1.0) {
1551  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1552  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1553  } else if (fabs(theta) > 1.0) {
1554  char rotate_buf[64];
1555  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1556  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1557  } else if (fabs(theta) < 1.0) {
1558  if (displaymatrix && displaymatrix[4] < 0) {
1559  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1560  }
1561  }
1562  if (ret < 0)
1563  return ret;
1564  }
1565 
1566  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1568  &last_filter, &pad_idx, name);
1569  if (ret < 0)
1570  return ret;
1571 
1572  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1573  return ret;
1574  return 0;
1575 fail:
1576  av_freep(&par);
1577 
1578  return ret;
1579 }
1580 
1582  InputFilter *ifilter, AVFilterInOut *in)
1583 {
1584  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1585  AVFilterContext *last_filter;
1586  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1587  AVBPrint args;
1588  char name[255];
1589  int ret, pad_idx = 0;
1590 
1591  ifp->time_base = (AVRational){ 1, ifp->sample_rate };
1592 
1594  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1595  ifp->time_base.num, ifp->time_base.den,
1596  ifp->sample_rate,
1598  if (av_channel_layout_check(&ifp->ch_layout) &&
1600  av_bprintf(&args, ":channel_layout=");
1602  } else
1603  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1604  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1605 
1606  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1607  name, args.str, NULL,
1608  graph)) < 0)
1609  return ret;
1610  last_filter = ifp->filter;
1611 
1612  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1614  &last_filter, &pad_idx, name);
1615  if (ret < 0)
1616  return ret;
1617 
1618  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1619  return ret;
1620 
1621  return 0;
1622 }
1623 
1625  InputFilter *ifilter, AVFilterInOut *in)
1626 {
1627  switch (ifp_from_ifilter(ifilter)->type) {
1628  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1629  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1630  default: av_assert0(0); return 0;
1631  }
1632 }
1633 
1635 {
1636  for (int i = 0; i < fg->nb_outputs; i++)
1638  for (int i = 0; i < fg->nb_inputs; i++)
1639  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1640  avfilter_graph_free(&fgt->graph);
1641 }
1642 
1644 {
1645  return f->nb_inputs == 0 &&
1646  (!strcmp(f->filter->name, "buffer") ||
1647  !strcmp(f->filter->name, "abuffer"));
1648 }
1649 
1650 static int graph_is_meta(AVFilterGraph *graph)
1651 {
1652  for (unsigned i = 0; i < graph->nb_filters; i++) {
1653  const AVFilterContext *f = graph->filters[i];
1654 
1655  /* in addition to filters flagged as meta, also
1656  * disregard sinks and buffersources (but not other sources,
1657  * since they introduce data we are not aware of)
1658  */
1659  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1660  f->nb_outputs == 0 ||
1662  return 0;
1663  }
1664  return 1;
1665 }
1666 
1667 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1668 
1670 {
1671  FilterGraphPriv *fgp = fgp_from_fg(fg);
1672  AVBufferRef *hw_device;
1673  AVFilterInOut *inputs, *outputs, *cur;
1674  int ret, i, simple = filtergraph_is_simple(fg);
1675  int have_input_eof = 0;
1676  const char *graph_desc = fgp->graph_desc;
1677 
1678  cleanup_filtergraph(fg, fgt);
1679  fgt->graph = avfilter_graph_alloc();
1680  if (!fgt->graph)
1681  return AVERROR(ENOMEM);
1682 
1683  if (simple) {
1684  OutputStream *ost = fg->outputs[0]->ost;
1685 
1686  if (filter_nbthreads) {
1687  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1688  if (ret < 0)
1689  goto fail;
1690  } else {
1691  const AVDictionaryEntry *e = NULL;
1692  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1693  if (e)
1694  av_opt_set(fgt->graph, "threads", e->value, 0);
1695  }
1696 
1697  if (av_dict_count(ost->sws_dict)) {
1698  ret = av_dict_get_string(ost->sws_dict,
1699  &fgt->graph->scale_sws_opts,
1700  '=', ':');
1701  if (ret < 0)
1702  goto fail;
1703  }
1704 
1705  if (av_dict_count(ost->swr_opts)) {
1706  char *args;
1707  ret = av_dict_get_string(ost->swr_opts, &args, '=', ':');
1708  if (ret < 0)
1709  goto fail;
1710  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1711  av_free(args);
1712  }
1713  } else {
1715  }
1716 
1717  hw_device = hw_device_for_filter();
1718 
1719  if ((ret = graph_parse(fgt->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
1720  goto fail;
1721 
1722  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1723  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1726  goto fail;
1727  }
1729 
1730  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1731  ret = configure_output_filter(fg, fgt->graph, fg->outputs[i], cur);
1732  if (ret < 0) {
1734  goto fail;
1735  }
1736  }
1738 
1739  if (fgp->disable_conversions)
1741  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1742  goto fail;
1743 
1744  fgp->is_meta = graph_is_meta(fgt->graph);
1745 
1746  /* limit the lists of allowed formats to the ones selected, to
1747  * make sure they stay the same if the filtergraph is reconfigured later */
1748  for (int i = 0; i < fg->nb_outputs; i++) {
1749  OutputFilter *ofilter = fg->outputs[i];
1750  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1751  AVFilterContext *sink = ofp->filter;
1752 
1753  ofp->format = av_buffersink_get_format(sink);
1754 
1755  ofp->width = av_buffersink_get_w(sink);
1756  ofp->height = av_buffersink_get_h(sink);
1757 
1758  // If the timing parameters are not locked yet, get the tentative values
1759  // here but don't lock them. They will only be used if no output frames
1760  // are ever produced.
1761  if (!ofp->tb_out_locked) {
1763  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1764  fr.num > 0 && fr.den > 0)
1765  ofp->fps.framerate = fr;
1766  ofp->tb_out = av_buffersink_get_time_base(sink);
1767  }
1769 
1772  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1773  if (ret < 0)
1774  goto fail;
1775  }
1776 
1777  for (int i = 0; i < fg->nb_inputs; i++) {
1779  AVFrame *tmp;
1780  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1781  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1782  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
1783  } else {
1785  }
1786  av_frame_free(&tmp);
1787  if (ret < 0)
1788  goto fail;
1789  }
1790  }
1791 
1792  /* send the EOFs for the finished inputs */
1793  for (int i = 0; i < fg->nb_inputs; i++) {
1795  if (fgt->eof_in[i]) {
1797  if (ret < 0)
1798  goto fail;
1799  have_input_eof = 1;
1800  }
1801  }
1802 
1803  if (have_input_eof) {
1804  // make sure the EOF propagates to the end of the graph
1806  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1807  goto fail;
1808  }
1809 
1810  return 0;
1811 fail:
1812  cleanup_filtergraph(fg, fgt);
1813  return ret;
1814 }
1815 
1817 {
1818  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1819 
1820  if (dec->codec_type == AVMEDIA_TYPE_VIDEO) {
1821  ifp->fallback.format = dec->pix_fmt;
1822  ifp->fallback.width = dec->width;
1823  ifp->fallback.height = dec->height;
1825  ifp->fallback.color_space = dec->colorspace;
1826  ifp->fallback.color_range = dec->color_range;
1827  } else if (dec->codec_type == AVMEDIA_TYPE_AUDIO) {
1828  int ret;
1829 
1830  ifp->fallback.format = dec->sample_fmt;
1831  ifp->fallback.sample_rate = dec->sample_rate;
1832 
1834  if (ret < 0)
1835  return ret;
1836  }
1837 
1838  return 0;
1839 }
1840 
1842 {
1843  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1844  AVFrameSideData *sd;
1845  int ret;
1846 
1848  if (ret < 0)
1849  return ret;
1850 
1851  ifp->format = frame->format;
1852 
1853  ifp->width = frame->width;
1854  ifp->height = frame->height;
1856  ifp->color_space = frame->colorspace;
1857  ifp->color_range = frame->color_range;
1858 
1859  ifp->sample_rate = frame->sample_rate;
1861  if (ret < 0)
1862  return ret;
1863 
1865  if (sd)
1866  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
1867  ifp->displaymatrix_present = !!sd;
1868 
1869  return 0;
1870 }
1871 
1873 {
1874  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
1875  return fgp->is_simple;
1876 }
1877 
1878 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
1879  double time, const char *target,
1880  const char *command, const char *arg, int all_filters)
1881 {
1882  int ret;
1883 
1884  if (!graph)
1885  return;
1886 
1887  if (time < 0) {
1888  char response[4096];
1889  ret = avfilter_graph_send_command(graph, target, command, arg,
1890  response, sizeof(response),
1891  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
1892  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
1893  fg->index, ret, response);
1894  } else if (!all_filters) {
1895  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
1896  } else {
1897  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
1898  if (ret < 0)
1899  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
1900  }
1901 }
1902 
1903 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
1904 {
1905  int nb_requests, nb_requests_max = 0;
1906  int best_input = -1;
1907 
1908  for (int i = 0; i < fg->nb_inputs; i++) {
1909  InputFilter *ifilter = fg->inputs[i];
1910  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1911 
1912  if (fgt->eof_in[i])
1913  continue;
1914 
1915  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
1916  if (nb_requests > nb_requests_max) {
1917  nb_requests_max = nb_requests;
1918  best_input = i;
1919  }
1920  }
1921 
1922  av_assert0(best_input >= 0);
1923 
1924  return best_input;
1925 }
1926 
1928 {
1929  OutputFilter *ofilter = &ofp->ofilter;
1930  FPSConvContext *fps = &ofp->fps;
1931  AVRational tb = (AVRational){ 0, 0 };
1932  AVRational fr;
1933  const FrameData *fd;
1934 
1935  fd = frame_data_c(frame);
1936 
1937  // apply -enc_time_base
1938  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
1939  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
1940  av_log(ofilter->ost, AV_LOG_ERROR,
1941  "Demuxing timebase not available - cannot use it for encoding\n");
1942  return AVERROR(EINVAL);
1943  }
1944 
1945  switch (ofp->enc_timebase.num) {
1946  case 0: break;
1947  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
1948  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
1949  default: tb = ofp->enc_timebase; break;
1950  }
1951 
1952  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
1953  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
1954  goto finish;
1955  }
1956 
1957  fr = fps->framerate;
1958  if (!fr.num) {
1960  if (fr_sink.num > 0 && fr_sink.den > 0)
1961  fr = fr_sink;
1962  }
1963 
1964  if (ofilter->ost->is_cfr) {
1965  if (!fr.num && !fps->framerate_max.num) {
1966  fr = (AVRational){25, 1};
1967  av_log(ofilter->ost, AV_LOG_WARNING,
1968  "No information "
1969  "about the input framerate is available. Falling "
1970  "back to a default value of 25fps. Use the -r option "
1971  "if you want a different framerate.\n");
1972  }
1973 
1974  if (fps->framerate_max.num &&
1975  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
1976  !fr.den))
1977  fr = fps->framerate_max;
1978  }
1979 
1980  if (fr.num > 0) {
1981  if (fps->framerate_supported) {
1982  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
1983  fr = fps->framerate_supported[idx];
1984  }
1985  if (fps->framerate_clip) {
1986  av_reduce(&fr.num, &fr.den,
1987  fr.num, fr.den, fps->framerate_clip);
1988  }
1989  }
1990 
1991  if (!(tb.num > 0 && tb.den > 0))
1992  tb = av_inv_q(fr);
1993  if (!(tb.num > 0 && tb.den > 0))
1994  tb = frame->time_base;
1995 
1996  fps->framerate = fr;
1997 finish:
1998  ofp->tb_out = tb;
1999  ofp->tb_out_locked = 1;
2000 
2001  return 0;
2002 }
2003 
2005  int64_t start_time)
2006 {
2007  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2008 
2009  AVRational tb = tb_dst;
2010  AVRational filter_tb = frame->time_base;
2011  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2012 
2013  if (frame->pts == AV_NOPTS_VALUE)
2014  goto early_exit;
2015 
2016  tb.den <<= extra_bits;
2017  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2019  float_pts /= 1 << extra_bits;
2020  // when float_pts is not exactly an integer,
2021  // avoid exact midpoints to reduce the chance of rounding differences, this
2022  // can be removed in case the fps code is changed to work with integers
2023  if (float_pts != llrint(float_pts))
2024  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2025 
2026  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2028  frame->time_base = tb_dst;
2029 
2030 early_exit:
2031 
2032  if (debug_ts) {
2033  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2034  frame ? av_ts2str(frame->pts) : "NULL",
2035  av_ts2timestr(frame->pts, &tb_dst),
2036  float_pts, tb_dst.num, tb_dst.den);
2037  }
2038 
2039  return float_pts;
2040 }
2041 
2042 /* Convert frame timestamps to the encoder timebase and decide how many times
2043  * should this (and possibly previous) frame be repeated in order to conform to
2044  * desired target framerate (if any).
2045  */
2047  int64_t *nb_frames, int64_t *nb_frames_prev)
2048 {
2049  OutputFilter *ofilter = &ofp->ofilter;
2050  OutputStream *ost = ofilter->ost;
2051  FPSConvContext *fps = &ofp->fps;
2052  double delta0, delta, sync_ipts, duration;
2053 
2054  if (!frame) {
2055  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2056  fps->frames_prev_hist[1],
2057  fps->frames_prev_hist[2]);
2058 
2059  if (!*nb_frames && fps->last_dropped) {
2060  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2061  fps->last_dropped++;
2062  }
2063 
2064  goto finish;
2065  }
2066 
2068 
2069  sync_ipts = adjust_frame_pts_to_encoder_tb(frame, ofp->tb_out, ofp->ts_offset);
2070  /* delta0 is the "drift" between the input frame and
2071  * where it would fall in the output. */
2072  delta0 = sync_ipts - ofp->next_pts;
2073  delta = delta0 + duration;
2074 
2075  // tracks the number of times the PREVIOUS frame should be duplicated,
2076  // mostly for variable framerate (VFR)
2077  *nb_frames_prev = 0;
2078  /* by default, we output a single frame */
2079  *nb_frames = 1;
2080 
2081  if (delta0 < 0 &&
2082  delta > 0 &&
2083  ost->vsync_method != VSYNC_PASSTHROUGH
2085  && ost->vsync_method != VSYNC_DROP
2086 #endif
2087  ) {
2088  if (delta0 < -0.6) {
2089  av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2090  } else
2091  av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2092  sync_ipts = ofp->next_pts;
2093  duration += delta0;
2094  delta0 = 0;
2095  }
2096 
2097  switch (ost->vsync_method) {
2098  case VSYNC_VSCFR:
2099  if (fps->frame_number == 0 && delta0 >= 0.5) {
2100  av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2101  delta = duration;
2102  delta0 = 0;
2103  ofp->next_pts = llrint(sync_ipts);
2104  }
2105  case VSYNC_CFR:
2106  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2107  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2108  *nb_frames = 0;
2109  } else if (delta < -1.1)
2110  *nb_frames = 0;
2111  else if (delta > 1.1) {
2112  *nb_frames = llrintf(delta);
2113  if (delta0 > 1.1)
2114  *nb_frames_prev = llrintf(delta0 - 0.6);
2115  }
2116  frame->duration = 1;
2117  break;
2118  case VSYNC_VFR:
2119  if (delta <= -0.6)
2120  *nb_frames = 0;
2121  else if (delta > 0.6)
2122  ofp->next_pts = llrint(sync_ipts);
2124  break;
2125 #if FFMPEG_OPT_VSYNC_DROP
2126  case VSYNC_DROP:
2127 #endif
2128  case VSYNC_PASSTHROUGH:
2129  ofp->next_pts = llrint(sync_ipts);
2131  break;
2132  default:
2133  av_assert0(0);
2134  }
2135 
2136 finish:
2137  memmove(fps->frames_prev_hist + 1,
2138  fps->frames_prev_hist,
2139  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2140  fps->frames_prev_hist[0] = *nb_frames_prev;
2141 
2142  if (*nb_frames_prev == 0 && fps->last_dropped) {
2143  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2145  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2146  fps->frame_number, fps->last_frame->pts);
2147  }
2148  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2149  uint64_t nb_frames_dup;
2150  if (*nb_frames > dts_error_threshold * 30) {
2151  av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2152  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2153  *nb_frames = 0;
2154  return;
2155  }
2156  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2157  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2158  av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2159  if (nb_frames_dup > fps->dup_warning) {
2160  av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2161  fps->dup_warning *= 10;
2162  }
2163  }
2164 
2165  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2167 }
2168 
2170 {
2172  int ret;
2173 
2174  // we are finished and no frames were ever seen at this output,
2175  // at least initialize the encoder with a dummy frame
2176  if (!fgt->got_frame) {
2177  AVFrame *frame = fgt->frame;
2178  FrameData *fd;
2179 
2180  frame->time_base = ofp->tb_out;
2181  frame->format = ofp->format;
2182 
2183  frame->width = ofp->width;
2184  frame->height = ofp->height;
2186 
2187  frame->sample_rate = ofp->sample_rate;
2188  if (ofp->ch_layout.nb_channels) {
2190  if (ret < 0)
2191  return ret;
2192  }
2193 
2194  fd = frame_data(frame);
2195  if (!fd)
2196  return AVERROR(ENOMEM);
2197 
2198  fd->frame_rate_filter = ofp->fps.framerate;
2199 
2200  av_assert0(!frame->buf[0]);
2201 
2203  "No filtered frames for output stream, trying to "
2204  "initialize anyway.\n");
2205 
2206  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2207  if (ret < 0) {
2209  return ret;
2210  }
2211  }
2212 
2213  fgt->eof_out[ofp->index] = 1;
2214 
2215  return sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2216 }
2217 
2219  AVFrame *frame)
2220 {
2222  AVFrame *frame_prev = ofp->fps.last_frame;
2223  enum AVMediaType type = ofp->ofilter.type;
2224 
2225  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2226 
2227  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2228  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2229 
2230  for (int64_t i = 0; i < nb_frames; i++) {
2231  AVFrame *frame_out;
2232  int ret;
2233 
2234  if (type == AVMEDIA_TYPE_VIDEO) {
2235  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2236  frame_prev : frame;
2237  if (!frame_in)
2238  break;
2239 
2240  frame_out = fgp->frame_enc;
2241  ret = av_frame_ref(frame_out, frame_in);
2242  if (ret < 0)
2243  return ret;
2244 
2245  frame_out->pts = ofp->next_pts;
2246 
2247  if (ofp->fps.dropped_keyframe) {
2248  frame_out->flags |= AV_FRAME_FLAG_KEY;
2249  ofp->fps.dropped_keyframe = 0;
2250  }
2251  } else {
2252  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2255 
2256  frame->time_base = ofp->tb_out;
2258  (AVRational){ 1, frame->sample_rate },
2259  ofp->tb_out);
2260 
2261  ofp->next_pts = frame->pts + frame->duration;
2262 
2263  frame_out = frame;
2264  }
2265 
2266  {
2267  // send the frame to consumers
2268  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2269  if (ret < 0) {
2270  av_frame_unref(frame_out);
2271 
2272  if (!fgt->eof_out[ofp->index]) {
2273  fgt->eof_out[ofp->index] = 1;
2274  fgp->nb_outputs_done++;
2275  }
2276 
2277  return ret == AVERROR_EOF ? 0 : ret;
2278  }
2279  }
2280 
2281  if (type == AVMEDIA_TYPE_VIDEO) {
2282  ofp->fps.frame_number++;
2283  ofp->next_pts++;
2284 
2285  if (i == nb_frames_prev && frame)
2287  }
2288 
2289  fgt->got_frame = 1;
2290  }
2291 
2292  if (frame && frame_prev) {
2293  av_frame_unref(frame_prev);
2294  av_frame_move_ref(frame_prev, frame);
2295  }
2296 
2297  if (!frame)
2298  return close_output(ofp, fgt);
2299 
2300  return 0;
2301 }
2302 
2304  AVFrame *frame)
2305 {
2307  OutputStream *ost = ofp->ofilter.ost;
2308  AVFilterContext *filter = ofp->filter;
2309  FrameData *fd;
2310  int ret;
2311 
2314  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2315  ret = fg_output_frame(ofp, fgt, NULL);
2316  return (ret < 0) ? ret : 1;
2317  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2318  return 1;
2319  } else if (ret < 0) {
2320  av_log(fgp, AV_LOG_WARNING,
2321  "Error in retrieving a frame from the filtergraph: %s\n",
2322  av_err2str(ret));
2323  return ret;
2324  }
2325 
2326  if (fgt->eof_out[ofp->index]) {
2328  return 0;
2329  }
2330 
2332 
2333  if (debug_ts)
2334  av_log(fgp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2337 
2338  // Choose the output timebase the first time we get a frame.
2339  if (!ofp->tb_out_locked) {
2340  ret = choose_out_timebase(ofp, frame);
2341  if (ret < 0) {
2342  av_log(ost, AV_LOG_ERROR, "Could not choose an output time base\n");
2344  return ret;
2345  }
2346  }
2347 
2348  fd = frame_data(frame);
2349  if (!fd) {
2351  return AVERROR(ENOMEM);
2352  }
2353 
2355 
2356  // only use bits_per_raw_sample passed through from the decoder
2357  // if the filtergraph did not touch the frame data
2358  if (!fgp->is_meta)
2359  fd->bits_per_raw_sample = 0;
2360 
2361  if (ost->type == AVMEDIA_TYPE_VIDEO) {
2362  if (!frame->duration) {
2364  if (fr.num > 0 && fr.den > 0)
2366  }
2367 
2368  fd->frame_rate_filter = ofp->fps.framerate;
2369  }
2370 
2371  ret = fg_output_frame(ofp, fgt, frame);
2373  if (ret < 0)
2374  return ret;
2375 
2376  return 0;
2377 }
2378 
2379 /* retrieve all frames available at filtergraph outputs
2380  * and send them to consumers */
2382  AVFrame *frame)
2383 {
2384  FilterGraphPriv *fgp = fgp_from_fg(fg);
2385  int did_step = 0;
2386 
2387  // graph not configured, just select the input to request
2388  if (!fgt->graph) {
2389  for (int i = 0; i < fg->nb_inputs; i++) {
2391  if (ifp->format < 0 && !fgt->eof_in[i]) {
2392  fgt->next_in = i;
2393  return 0;
2394  }
2395  }
2396 
2397  // This state - graph is not configured, but all inputs are either
2398  // initialized or EOF - should be unreachable because sending EOF to a
2399  // filter without even a fallback format should fail
2400  av_assert0(0);
2401  return AVERROR_BUG;
2402  }
2403 
2404  while (fgp->nb_outputs_done < fg->nb_outputs) {
2405  int ret;
2406 
2408  if (ret == AVERROR(EAGAIN)) {
2409  fgt->next_in = choose_input(fg, fgt);
2410  break;
2411  } else if (ret < 0) {
2412  if (ret == AVERROR_EOF)
2413  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2414  else
2415  av_log(fg, AV_LOG_ERROR,
2416  "Error requesting a frame from the filtergraph: %s\n",
2417  av_err2str(ret));
2418  return ret;
2419  }
2420  fgt->next_in = fg->nb_inputs;
2421 
2422  // return after one iteration, so that scheduler can rate-control us
2423  if (did_step && fgp->have_sources)
2424  return 0;
2425 
2426  /* Reap all buffers present in the buffer sinks */
2427  for (int i = 0; i < fg->nb_outputs; i++) {
2429 
2430  ret = 0;
2431  while (!ret) {
2432  ret = fg_output_step(ofp, fgt, frame);
2433  if (ret < 0)
2434  return ret;
2435  }
2436  }
2437  did_step = 1;
2438  }
2439 
2440  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2441 }
2442 
2443 static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
2444 {
2445  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2446  int64_t pts2;
2447 
2448  /* subtitles seem to be usually muxed ahead of other streams;
2449  if not, subtracting a larger time here is necessary */
2450  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2451 
2452  /* do not send the heartbeat frame if the subtitle is already ahead */
2453  if (pts2 <= ifp->sub2video.last_pts)
2454  return;
2455 
2456  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2457  /* if we have hit the end of the current displayed subpicture,
2458  or if we need to initialize the system, update the
2459  overlayed subpicture and its start/end times */
2460  sub2video_update(ifp, pts2 + 1, NULL);
2461  else
2462  sub2video_push_ref(ifp, pts2);
2463 }
2464 
2465 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2466 {
2467  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2468  int ret;
2469 
2470  if (buffer) {
2471  AVFrame *tmp;
2472 
2473  if (!frame)
2474  return 0;
2475 
2476  tmp = av_frame_alloc();
2477  if (!tmp)
2478  return AVERROR(ENOMEM);
2479 
2481 
2482  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2483  if (ret < 0) {
2484  av_frame_free(&tmp);
2485  return ret;
2486  }
2487 
2488  return 0;
2489  }
2490 
2491  // heartbeat frame
2492  if (frame && !frame->buf[0]) {
2494  return 0;
2495  }
2496 
2497  if (!frame) {
2498  if (ifp->sub2video.end_pts < INT64_MAX)
2499  sub2video_update(ifp, INT64_MAX, NULL);
2500 
2501  return av_buffersrc_add_frame(ifp->filter, NULL);
2502  }
2503 
2504  ifp->width = frame->width ? frame->width : ifp->width;
2505  ifp->height = frame->height ? frame->height : ifp->height;
2506 
2507  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2508 
2509  return 0;
2510 }
2511 
2512 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2513  int64_t pts, AVRational tb)
2514 {
2515  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2516  int ret;
2517 
2518  if (fgt->eof_in[ifp->index])
2519  return 0;
2520 
2521  fgt->eof_in[ifp->index] = 1;
2522 
2523  if (ifp->filter) {
2524  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2526 
2528  if (ret < 0)
2529  return ret;
2530  } else {
2531  if (ifp->format < 0) {
2532  // the filtergraph was never configured, use the fallback parameters
2533  ifp->format = ifp->fallback.format;
2534  ifp->sample_rate = ifp->fallback.sample_rate;
2535  ifp->width = ifp->fallback.width;
2536  ifp->height = ifp->fallback.height;
2538  ifp->color_space = ifp->fallback.color_space;
2539  ifp->color_range = ifp->fallback.color_range;
2540 
2542  &ifp->fallback.ch_layout);
2543  if (ret < 0)
2544  return ret;
2545 
2546  if (ifilter_has_all_input_formats(ifilter->graph)) {
2547  ret = configure_filtergraph(ifilter->graph, fgt);
2548  if (ret < 0) {
2549  av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
2550  return ret;
2551  }
2552  }
2553  }
2554 
2555  if (ifp->format < 0) {
2557  "Cannot determine format of input %s after EOF\n",
2558  ifp->opts.name);
2559  return AVERROR_INVALIDDATA;
2560  }
2561  }
2562 
2563  return 0;
2564 }
2565 
2567  VIDEO_CHANGED = (1 << 0),
2568  AUDIO_CHANGED = (1 << 1),
2569  MATRIX_CHANGED = (1 << 2),
2570  HWACCEL_CHANGED = (1 << 3)
2571 };
2572 
2573 static const char *unknown_if_null(const char *str)
2574 {
2575  return str ? str : "unknown";
2576 }
2577 
2579  InputFilter *ifilter, AVFrame *frame)
2580 {
2581  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2582  FrameData *fd;
2583  AVFrameSideData *sd;
2584  int need_reinit = 0, ret;
2585 
2586  /* determine if the parameters for this input changed */
2587  switch (ifp->type) {
2588  case AVMEDIA_TYPE_AUDIO:
2589  if (ifp->format != frame->format ||
2590  ifp->sample_rate != frame->sample_rate ||
2592  need_reinit |= AUDIO_CHANGED;
2593  break;
2594  case AVMEDIA_TYPE_VIDEO:
2595  if (ifp->format != frame->format ||
2596  ifp->width != frame->width ||
2597  ifp->height != frame->height ||
2598  ifp->color_space != frame->colorspace ||
2599  ifp->color_range != frame->color_range)
2600  need_reinit |= VIDEO_CHANGED;
2601  break;
2602  }
2603 
2605  if (!ifp->displaymatrix_present ||
2606  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2607  need_reinit |= MATRIX_CHANGED;
2608  } else if (ifp->displaymatrix_present)
2609  need_reinit |= MATRIX_CHANGED;
2610 
2611  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2612  need_reinit = 0;
2613 
2614  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2615  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2616  need_reinit |= HWACCEL_CHANGED;
2617 
2618  if (need_reinit) {
2620  if (ret < 0)
2621  return ret;
2622  }
2623 
2624  /* (re)init the graph if possible, otherwise buffer the frame and return */
2625  if (need_reinit || !fgt->graph) {
2626  AVFrame *tmp = av_frame_alloc();
2627 
2628  if (!tmp)
2629  return AVERROR(ENOMEM);
2630 
2631  if (!ifilter_has_all_input_formats(fg)) {
2633 
2634  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2635  if (ret < 0)
2636  av_frame_free(&tmp);
2637 
2638  return ret;
2639  }
2640 
2641  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2642  av_frame_free(&tmp);
2643  if (ret < 0)
2644  return ret;
2645 
2646  if (fgt->graph) {
2647  AVBPrint reason;
2649  if (need_reinit & AUDIO_CHANGED) {
2650  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2651  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2653  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2654  }
2655  if (need_reinit & VIDEO_CHANGED) {
2656  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2657  const char *color_space_name = av_color_space_name(frame->colorspace);
2658  const char *color_range_name = av_color_range_name(frame->color_range);
2659  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2660  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2661  unknown_if_null(color_space_name), frame->width, frame->height);
2662  }
2663  if (need_reinit & MATRIX_CHANGED)
2664  av_bprintf(&reason, "display matrix changed, ");
2665  if (need_reinit & HWACCEL_CHANGED)
2666  av_bprintf(&reason, "hwaccel changed, ");
2667  if (reason.len > 1)
2668  reason.str[reason.len - 2] = '\0'; // remove last comma
2669  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2670  }
2671 
2672  ret = configure_filtergraph(fg, fgt);
2673  if (ret < 0) {
2674  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2675  return ret;
2676  }
2677  }
2678 
2681  frame->time_base = ifp->time_base;
2682 #if LIBAVUTIL_VERSION_MAJOR < 59
2685  )
2686 #endif
2687 
2688  fd = frame_data(frame);
2689  if (!fd)
2690  return AVERROR(ENOMEM);
2691  fd->wallclock[LATENCY_PROBE_FILTER_PRE] = av_gettime_relative();
2692 
2695  if (ret < 0) {
2697  if (ret != AVERROR_EOF)
2698  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2699  return ret;
2700  }
2701 
2702  return 0;
2703 }
2704 
2705 static void fg_thread_set_name(const FilterGraph *fg)
2706 {
2707  char name[16];
2708  if (filtergraph_is_simple(fg)) {
2709  OutputStream *ost = fg->outputs[0]->ost;
2710  snprintf(name, sizeof(name), "%cf#%d:%d",
2711  av_get_media_type_string(ost->type)[0],
2712  ost->file->index, ost->index);
2713  } else {
2714  snprintf(name, sizeof(name), "fc%d", fg->index);
2715  }
2716 
2718 }
2719 
2721 {
2722  if (fgt->frame_queue_out) {
2723  AVFrame *frame;
2724  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2725  av_frame_free(&frame);
2727  }
2728 
2729  av_frame_free(&fgt->frame);
2730  av_freep(&fgt->eof_in);
2731  av_freep(&fgt->eof_out);
2732 
2733  avfilter_graph_free(&fgt->graph);
2734 
2735  memset(fgt, 0, sizeof(*fgt));
2736 }
2737 
2738 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2739 {
2740  memset(fgt, 0, sizeof(*fgt));
2741 
2742  fgt->frame = av_frame_alloc();
2743  if (!fgt->frame)
2744  goto fail;
2745 
2746  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2747  if (!fgt->eof_in)
2748  goto fail;
2749 
2750  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2751  if (!fgt->eof_out)
2752  goto fail;
2753 
2755  if (!fgt->frame_queue_out)
2756  goto fail;
2757 
2758  return 0;
2759 
2760 fail:
2761  fg_thread_uninit(fgt);
2762  return AVERROR(ENOMEM);
2763 }
2764 
2765 static void *filter_thread(void *arg)
2766 {
2767  FilterGraphPriv *fgp = arg;
2768  FilterGraph *fg = &fgp->fg;
2769 
2770  FilterGraphThread fgt;
2771  int ret = 0, input_status = 0;
2772 
2773  ret = fg_thread_init(&fgt, fg);
2774  if (ret < 0)
2775  goto finish;
2776 
2777  fg_thread_set_name(fg);
2778 
2779  // if we have all input parameters the graph can now be configured
2781  ret = configure_filtergraph(fg, &fgt);
2782  if (ret < 0) {
2783  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2784  av_err2str(ret));
2785  goto finish;
2786  }
2787  }
2788 
2789  while (1) {
2790  InputFilter *ifilter;
2791  InputFilterPriv *ifp;
2792  enum FrameOpaque o;
2793  unsigned input_idx = fgt.next_in;
2794 
2795  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2796  &input_idx, fgt.frame);
2797  if (input_status == AVERROR_EOF) {
2798  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
2799  break;
2800  } else if (input_status == AVERROR(EAGAIN)) {
2801  // should only happen when we didn't request any input
2802  av_assert0(input_idx == fg->nb_inputs);
2803  goto read_frames;
2804  }
2805  av_assert0(input_status >= 0);
2806 
2807  o = (intptr_t)fgt.frame->opaque;
2808 
2809  o = (intptr_t)fgt.frame->opaque;
2810 
2811  // message on the control stream
2812  if (input_idx == fg->nb_inputs) {
2813  FilterCommand *fc;
2814 
2815  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
2816 
2817  fc = (FilterCommand*)fgt.frame->buf[0]->data;
2818  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
2819  fc->all_filters);
2820  av_frame_unref(fgt.frame);
2821  continue;
2822  }
2823 
2824  // we received an input frame or EOF
2825  ifilter = fg->inputs[input_idx];
2826  ifp = ifp_from_ifilter(ifilter);
2827 
2828  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2829  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
2830  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
2831  !fgt.graph);
2832  } else if (fgt.frame->buf[0]) {
2833  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
2834  } else {
2836  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
2837  }
2838  av_frame_unref(fgt.frame);
2839  if (ret == AVERROR_EOF) {
2840  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
2841  input_idx);
2842  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
2843  continue;
2844  }
2845  if (ret < 0)
2846  goto finish;
2847 
2848 read_frames:
2849  // retrieve all newly avalable frames
2850  ret = read_frames(fg, &fgt, fgt.frame);
2851  if (ret == AVERROR_EOF) {
2852  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
2853  break;
2854  } else if (ret < 0) {
2855  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
2856  av_err2str(ret));
2857  goto finish;
2858  }
2859  }
2860 
2861  for (unsigned i = 0; i < fg->nb_outputs; i++) {
2863 
2864  if (fgt.eof_out[i] || !fgt.graph)
2865  continue;
2866 
2867  ret = fg_output_frame(ofp, &fgt, NULL);
2868  if (ret < 0)
2869  goto finish;
2870  }
2871 
2872 finish:
2873  // EOF is normal termination
2874  if (ret == AVERROR_EOF)
2875  ret = 0;
2876 
2877  fg_thread_uninit(&fgt);
2878 
2879  return (void*)(intptr_t)ret;
2880 }
2881 
2882 void fg_send_command(FilterGraph *fg, double time, const char *target,
2883  const char *command, const char *arg, int all_filters)
2884 {
2885  FilterGraphPriv *fgp = fgp_from_fg(fg);
2886  AVBufferRef *buf;
2887  FilterCommand *fc;
2888 
2889  fc = av_mallocz(sizeof(*fc));
2890  if (!fc)
2891  return;
2892 
2893  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
2894  if (!buf) {
2895  av_freep(&fc);
2896  return;
2897  }
2898 
2899  fc->target = av_strdup(target);
2900  fc->command = av_strdup(command);
2901  fc->arg = av_strdup(arg);
2902  if (!fc->target || !fc->command || !fc->arg) {
2903  av_buffer_unref(&buf);
2904  return;
2905  }
2906 
2907  fc->time = time;
2908  fc->all_filters = all_filters;
2909 
2910  fgp->frame->buf[0] = buf;
2911  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
2912 
2913  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
2914 }
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:122
AVSubtitle
Definition: avcodec.h:2273
formats
formats
Definition: signature.h:48
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, OutputStream *ost)
Definition: ffmpeg_filter.c:706
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1624
init_complex_filtergraph
int init_complex_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1162
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:94
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:708
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:656
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:234
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:604
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:119
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:286
av_clip
#define av_clip
Definition: common.h:98
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:122
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2175
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:73
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:204
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:126
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2278
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1903
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1118
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:64
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:68
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:96
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:856
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:138
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:961
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1033
FrameData
Definition: ffmpeg.h:616
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:1878
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:166
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:1335
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:243
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1068
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:110
init_simple_filtergraph
int init_simple_filtergraph(InputStream *ist, OutputStream *ost, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc)
Definition: ffmpeg_filter.c:1055
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:807
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2968
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:303
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:151
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1841
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2570
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:68
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:140
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:242
AVSubtitleRect
Definition: avcodec.h:2245
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2277
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:874
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:186
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:609
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:501
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:667
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:989
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
AVFrame::width
int width
Definition: frame.h:412
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:49
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:56
check_filter_outputs
int check_filter_outputs(void)
Definition: ffmpeg_filter.c:1449
AVOption
AVOption.
Definition: opt.h:251
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2218
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:184
FilterGraph::index
int index
Definition: ffmpeg.h:301
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:137
data
const char data[16]
Definition: mxf.c:148
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:190
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:229
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1634
ifilter_parameters_from_dec
int ifilter_parameters_from_dec(InputFilter *ifilter, const AVCodecContext *dec)
Set up fallback filtering parameters from a decoder context.
Definition: ffmpeg_filter.c:1816
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AV_NOWARN_DEPRECATED
#define AV_NOWARN_DEPRECATED(code)
Disable warnings about deprecated features This is useful for sections of code kept for backward comp...
Definition: attributes.h:126
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:464
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:304
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2567
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:649
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:234
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:318
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:254
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:323
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:590
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:370
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2303
FilterGraphPriv
Definition: ffmpeg_filter.c:45
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:793
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:99
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:116
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1669
choose_pix_fmts
static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint, const char **dst)
Definition: ffmpeg_filter.c:369
OutputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:606
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
InputStream
Definition: ffmpeg.h:356
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:82
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:76
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:260
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:276
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:137
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:164
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1329
finish
static void finish(void)
Definition: movenc.c:342
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
offset must point to a pointer immediately followed by an int for the length
Definition: opt.h:231
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3344
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:92
OutputFilterPriv
Definition: ffmpeg_filter.c:199
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2720
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2111
fail
#define fail()
Definition: checkasm.h:179
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:312
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:259
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:618
val
static double val(void *priv, double ch)
Definition: aeval.c:78
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:202
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:802
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:116
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1477
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
pts
static int64_t pts
Definition: transcode_aac.c:643
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:497
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1650
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:86
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:106
FrameData::tb
AVRational tb
Definition: ffmpeg.h:626
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:72
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:209
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:191
AVRational::num
int num
Numerator.
Definition: rational.h:59
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:106
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:261
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:835
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2443
check_stream_specifier
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:986
OutputFile::shortest
int shortest
Definition: ffmpeg.h:611
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
avassert.h
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:629
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2512
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:103
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:845
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1165
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:248
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:74
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:66
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:115
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:305
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:642
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
configure_output_video_filter
static int configure_output_video_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1246
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:291
InputFilter
Definition: ffmpeg.h:279
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:59
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:285
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, InputFilterOptions *opts)
Definition: ffmpeg_demux.c:973
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:48
nb_streams
static int nb_streams
Definition: ffprobe.c:330
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2279
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:2738
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:263
graph_opts_apply
static int graph_opts_apply(AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:550
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:280
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:174
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:231
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:926
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1191
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
frame
static AVFrame * frame
Definition: demux_decode.c:54
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1782
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:224
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:208
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3284
AVFormatContext
Format I/O context.
Definition: avformat.h:1363
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:629
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:864
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:287
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1325
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:227
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:880
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:881
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:134
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:649
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:210
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1097
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:188
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1043
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:364
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:159
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:146
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1108
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1166
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:832
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:119
InputFilterPriv::ist
InputStream * ist
Definition: ffmpeg_filter.c:112
filter_opt_apply
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:495
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:219
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2169
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:91
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
Definition: ffmpeg_filter.c:659
mathops.h
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1326
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:69
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:633
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1455
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1189
AVFilterGraph
Definition: avfilter.h:830
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:259
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:133
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:653
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:306
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:223
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:634
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:372
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:125
OutputFile::streams
OutputStream ** streams
Definition: ffmpeg.h:605
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:259
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:46
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:200
FilterGraph
Definition: ffmpeg.h:299
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1178
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1136
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:81
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:272
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:986
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:312
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:835
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:1872
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:255
f
f
Definition: af_crystalizer.c:121
AVIOContext
Bytestream IO Context.
Definition: avio.h:166
configure_output_filter
static int configure_output_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1434
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:144
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:94
FilterGraphThread
Definition: ffmpeg_filter.c:83
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:147
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:313
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:84
init_input_filter
static int init_input_filter(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1096
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:105
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:983
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:567
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:217
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:148
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:329
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:138
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2004
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:135
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1084
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2569
FilterCommand::time
double time
Definition: ffmpeg_filter.c:244
insert_trim
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1173
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:170
graph_parse
static int graph_parse(AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:574
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1373
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:467
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
read_binary
static int read_binary(const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:447
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:57
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:426
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2276
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:100
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:61
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1108
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:563
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:132
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:455
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1643
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2568
SCH_DEC
#define SCH_DEC(decoder)
Definition: ffmpeg_sched.h:113
InputFilterPriv::fallback
struct InputFilterPriv::@5 fallback
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2110
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:225
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2573
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:273
FF_COMPLIANCE_UNOFFICIAL
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: defs.h:61
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:293
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2381
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:1003
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2228
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:424
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2578
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:933
buffersink.h
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:1033
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:951
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:245
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:420
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:196
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:182
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:118
FPSConvContext
Definition: ffmpeg_filter.c:179
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:108
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:631
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2882
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:51
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:203
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:72
src2
const pixel * src2
Definition: h264pred_template.c:422
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1581
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:613
display.h
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:194
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
InputFilterPriv::sub2video
struct InputFilterPriv::@6 sub2video
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:93
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:405
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:649
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:77
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:127
tb
#define tb
Definition: regdef.h:68
AVFrame::pkt_duration
attribute_deprecated int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:700
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:616
ofilter_bind_ost
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost, unsigned sched_idx_enc)
Definition: ffmpeg_filter.c:746
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:131
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:324
AVCodecContext::height
int height
Definition: avcodec.h:625
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:662
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:933
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:63
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2212
AVFilter
Filter definition.
Definition: avfilter.h:166
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2046
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:174
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:940
mid_pred
#define mid_pred
Definition: mathops.h:98
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:97
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:841
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:281
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:76
av_opt_eval_int
int av_opt_eval_int(void *obj, const AVOption *o, const char *val, int *int_out)
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:977
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:180
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:419
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1224
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1149
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:230
choose_channel_layouts
static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
Definition: ffmpeg_filter.c:427
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2566
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:265
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:447
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:966
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:752
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:91
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:412
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:847
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:398
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:630
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVRational::den
int den
Denominator.
Definition: rational.h:60
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1164
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:142
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:125
av_bprint_clear
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:232
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:662
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:54
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:256
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:659
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:1927
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:225
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:281
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1190
AVFilterContext
An instance of a filter.
Definition: avfilter.h:409
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:300
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:669
OutputFilter
Definition: ffmpeg.h:284
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2465
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:104
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:636
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:453
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:347
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:296
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:85
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
OutputStream::is_cfr
int is_cfr
Definition: ffmpeg.h:538
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
OutputStream::autoscale
int autoscale
Definition: ffmpeg.h:546
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2154
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:82
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:240
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:933
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:252
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:130
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:167
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:132
d
d
Definition: ffmpeg_filter.c:424
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:625
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:327
imgutils.h
timestamp.h
filter_thread
static void * filter_thread(void *arg)
Definition: ffmpeg_filter.c:2765
OutputStream
Definition: mux.c:53
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
OutputStream::st
AVStream * st
Definition: mux.c:54
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:207
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1343
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:106
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:63
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2038
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:300
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:833
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:412
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:652
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:608
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:432
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:214
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:978
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:71
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:208
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:119
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:193
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2705
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:151
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1466
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:826
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2275
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:241
FilterCommand
Definition: ffmpeg_filter.c:239
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:132
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2888
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:295
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:83
OutputFile
Definition: ffmpeg.h:597
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:270
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:107
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:195