FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/display.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/timestamp.h"
40 
41 typedef struct FilterGraphPriv {
43 
44  // name used for logging
45  char log_name[32];
46 
47  int is_simple;
48  // true when the filtergraph contains only meta filters
49  // that do not modify the frame data
50  int is_meta;
52 
53  const char *graph_desc;
54 
55  // frame for temporarily holding output from the filtergraph
58 
60 {
61  return (FilterGraphPriv*)fg;
62 }
63 
64 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
65 {
66  return (const FilterGraphPriv*)fg;
67 }
68 
69 typedef struct InputFilterPriv {
71 
73 
75 
76  // used to hold submitted input
78 
79  /* for filters that are not yet bound to an input stream,
80  * this stores the input linklabel, if any */
81  uint8_t *linklabel;
82 
83  // filter data type
85  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
86  // same as type otherwise
88 
89  int eof;
90 
91  // parameters configured for this input
92  int format;
93 
94  int width, height;
96 
99 
101 
103 
105 
108 
109  // fallback parameters to use when no input is ever sent
110  struct {
111  int format;
112 
113  int width;
114  int height;
116 
117  int sample_rate;
119  } fallback;
120 
121  struct {
122  AVFrame *frame;
123 
124  int64_t last_pts;
125  int64_t end_pts;
126 
127  ///< marks if sub2video_update should force an initialization
128  unsigned int initialize;
129  } sub2video;
131 
133 {
134  return (InputFilterPriv*)ifilter;
135 }
136 
137 typedef struct OutputFilterPriv {
139 
141 
142  /* desired output stream properties */
143  int format;
144  int width, height;
147 
150 
151  // those are only set if no format is specified and the encoder gives us multiple options
152  // They point directly to the relevant lists of the encoder.
153  const int *formats;
155  const int *sample_rates;
156 
157  // set to 1 after at least one frame passed through this output
160 
162 {
163  return (OutputFilterPriv*)ofilter;
164 }
165 
166 static int configure_filtergraph(FilterGraph *fg);
167 
169 {
170  AVFrame *frame = ifp->sub2video.frame;
171  int ret;
172 
174 
175  frame->width = ifp->width;
176  frame->height = ifp->height;
177  frame->format = ifp->format;
178 
180  if (ret < 0)
181  return ret;
182 
183  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 
185  return 0;
186 }
187 
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189  AVSubtitleRect *r)
190 {
191  uint32_t *pal, *dst2;
192  uint8_t *src, *src2;
193  int x, y;
194 
195  if (r->type != SUBTITLE_BITMAP) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197  return;
198  }
199  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201  r->x, r->y, r->w, r->h, w, h
202  );
203  return;
204  }
205 
206  dst += r->y * dst_linesize + r->x * 4;
207  src = r->data[0];
208  pal = (uint32_t *)r->data[1];
209  for (y = 0; y < r->h; y++) {
210  dst2 = (uint32_t *)dst;
211  src2 = src;
212  for (x = 0; x < r->w; x++)
213  *(dst2++) = pal[*(src2++)];
214  dst += dst_linesize;
215  src += r->linesize[0];
216  }
217 }
218 
219 static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
220 {
221  AVFrame *frame = ifp->sub2video.frame;
222  int ret;
223 
224  av_assert1(frame->data[0]);
225  ifp->sub2video.last_pts = frame->pts = pts;
229  if (ret != AVERROR_EOF && ret < 0)
230  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
231  av_err2str(ret));
232 }
233 
234 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
235  const AVSubtitle *sub)
236 {
237  AVFrame *frame = ifp->sub2video.frame;
238  int8_t *dst;
239  int dst_linesize;
240  int num_rects, i;
241  int64_t pts, end_pts;
242 
243  if (sub) {
244  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
245  AV_TIME_BASE_Q, ifp->time_base);
246  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
247  AV_TIME_BASE_Q, ifp->time_base);
248  num_rects = sub->num_rects;
249  } else {
250  /* If we are initializing the system, utilize current heartbeat
251  PTS as the start time, and show until the following subpicture
252  is received. Otherwise, utilize the previous subpicture's end time
253  as the fall-back value. */
254  pts = ifp->sub2video.initialize ?
255  heartbeat_pts : ifp->sub2video.end_pts;
256  end_pts = INT64_MAX;
257  num_rects = 0;
258  }
259  if (sub2video_get_blank_frame(ifp) < 0) {
261  "Impossible to get a blank canvas.\n");
262  return;
263  }
264  dst = frame->data [0];
265  dst_linesize = frame->linesize[0];
266  for (i = 0; i < num_rects; i++)
267  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
268  sub2video_push_ref(ifp, pts);
269  ifp->sub2video.end_pts = end_pts;
270  ifp->sub2video.initialize = 0;
271 }
272 
273 /* *dst may return be set to NULL (no pixel format found), a static string or a
274  * string backed by the bprint. Nothing has been written to the AVBPrint in case
275  * NULL is returned. The AVBPrint provided should be clean. */
276 static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint,
277  const char **dst)
278 {
279  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
280  OutputStream *ost = ofilter->ost;
281 
282  *dst = NULL;
283 
284  if (ost->keep_pix_fmt || ofp->format != AV_PIX_FMT_NONE) {
285  *dst = ofp->format == AV_PIX_FMT_NONE ? NULL :
287  } else if (ofp->formats) {
288  const enum AVPixelFormat *p = ofp->formats;
289 
290  for (; *p != AV_PIX_FMT_NONE; p++) {
291  const char *name = av_get_pix_fmt_name(*p);
292  av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
293  }
294  if (!av_bprint_is_complete(bprint))
295  return AVERROR(ENOMEM);
296 
297  *dst = bprint->str;
298  }
299 
300  return 0;
301 }
302 
303 /* Define a function for appending a list of allowed formats
304  * to an AVBPrint. If nonempty, the list will have a header. */
305 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
306 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
307 { \
308  if (ofp->var == none && !ofp->supported_list) \
309  return; \
310  av_bprintf(bprint, #name "="); \
311  if (ofp->var != none) { \
312  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
313  } else { \
314  const type *p; \
315  \
316  for (p = ofp->supported_list; *p != none; p++) { \
317  av_bprintf(bprint, printf_format "|", get_name(*p)); \
318  } \
319  if (bprint->len > 0) \
320  bprint->str[--bprint->len] = '\0'; \
321  } \
322  av_bprint_chars(bprint, ':', 1); \
323 }
324 
325 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
326 // GET_PIX_FMT_NAME)
327 
330 
332  "%d", )
333 
334 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
335 {
336  if (av_channel_layout_check(&ofp->ch_layout)) {
337  av_bprintf(bprint, "channel_layouts=");
338  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
339  } else if (ofp->ch_layouts) {
340  const AVChannelLayout *p;
341 
342  av_bprintf(bprint, "channel_layouts=");
343  for (p = ofp->ch_layouts; p->nb_channels; p++) {
345  av_bprintf(bprint, "|");
346  }
347  if (bprint->len > 0)
348  bprint->str[--bprint->len] = '\0';
349  } else
350  return;
351  av_bprint_chars(bprint, ':', 1);
352 }
353 
354 static int read_binary(const char *path, uint8_t **data, int *len)
355 {
356  AVIOContext *io = NULL;
357  int64_t fsize;
358  int ret;
359 
360  *data = NULL;
361  *len = 0;
362 
363  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
364  if (ret < 0) {
365  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
366  path, av_err2str(ret));
367  return ret;
368  }
369 
370  fsize = avio_size(io);
371  if (fsize < 0 || fsize > INT_MAX) {
372  av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
373  ret = AVERROR(EIO);
374  goto fail;
375  }
376 
377  *data = av_malloc(fsize);
378  if (!*data) {
379  ret = AVERROR(ENOMEM);
380  goto fail;
381  }
382 
383  ret = avio_read(io, *data, fsize);
384  if (ret != fsize) {
385  av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
386  ret = ret < 0 ? ret : AVERROR(EIO);
387  goto fail;
388  }
389 
390  *len = fsize;
391 
392  ret = 0;
393 fail:
394  avio_close(io);
395  if (ret < 0) {
396  av_freep(data);
397  *len = 0;
398  }
399  return ret;
400 }
401 
402 static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
403 {
404  const AVOption *o = NULL;
405  int ret;
406 
408  if (ret >= 0)
409  return 0;
410 
411  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
413  if (!o)
414  goto err_apply;
415 
416  // key is a valid option name prefixed with '/'
417  // interpret value as a path from which to load the actual option value
418  key++;
419 
420  if (o->type == AV_OPT_TYPE_BINARY) {
421  uint8_t *data;
422  int len;
423 
424  ret = read_binary(val, &data, &len);
425  if (ret < 0)
426  goto err_load;
427 
429  av_freep(&data);
430  } else {
431  char *data = file_read(val);
432  if (!data) {
433  ret = AVERROR(EIO);
434  goto err_load;
435  }
436 
438  av_freep(&data);
439  }
440  if (ret < 0)
441  goto err_apply;
442 
443  return 0;
444 
445 err_apply:
447  "Error applying option '%s' to filter '%s': %s\n",
448  key, f->filter->name, av_err2str(ret));
449  return ret;
450 err_load:
452  "Error loading value for option '%s' from file '%s'\n",
453  key, val);
454  return ret;
455 }
456 
458 {
459  for (size_t i = 0; i < seg->nb_chains; i++) {
460  AVFilterChain *ch = seg->chains[i];
461 
462  for (size_t j = 0; j < ch->nb_filters; j++) {
463  AVFilterParams *p = ch->filters[j];
464  const AVDictionaryEntry *e = NULL;
465 
466  av_assert0(p->filter);
467 
468  while ((e = av_dict_iterate(p->opts, e))) {
469  int ret = filter_opt_apply(p->filter, e->key, e->value);
470  if (ret < 0)
471  return ret;
472  }
473 
474  av_dict_free(&p->opts);
475  }
476  }
477 
478  return 0;
479 }
480 
481 static int graph_parse(AVFilterGraph *graph, const char *desc,
483  AVBufferRef *hw_device)
484 {
486  int ret;
487 
488  *inputs = NULL;
489  *outputs = NULL;
490 
491  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
492  if (ret < 0)
493  return ret;
494 
496  if (ret < 0)
497  goto fail;
498 
499  if (hw_device) {
500  for (int i = 0; i < graph->nb_filters; i++) {
501  AVFilterContext *f = graph->filters[i];
502 
503  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
504  continue;
505  f->hw_device_ctx = av_buffer_ref(hw_device);
506  if (!f->hw_device_ctx) {
507  ret = AVERROR(ENOMEM);
508  goto fail;
509  }
510  }
511  }
512 
513  ret = graph_opts_apply(seg);
514  if (ret < 0)
515  goto fail;
516 
518 
519 fail:
521  return ret;
522 }
523 
524 // Filters can be configured only if the formats of all inputs are known.
526 {
527  int i;
528  for (i = 0; i < fg->nb_inputs; i++) {
530  if (ifp->format < 0)
531  return 0;
532  }
533  return 1;
534 }
535 
536 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
537 {
538  AVFilterContext *ctx = inout->filter_ctx;
539  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
540  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
541 
542  if (nb_pads > 1)
543  return av_strdup(ctx->filter->name);
544  return av_asprintf("%s:%s", ctx->filter->name,
545  avfilter_pad_get_name(pads, inout->pad_idx));
546 }
547 
549 {
550  OutputFilterPriv *ofp;
551  OutputFilter *ofilter;
552 
553  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
554  if (!ofp)
555  return NULL;
556 
557  ofilter = &ofp->ofilter;
558  ofilter->graph = fg;
559  ofp->format = -1;
560  ofilter->last_pts = AV_NOPTS_VALUE;
561 
562  return ofilter;
563 }
564 
565 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
566 {
567  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
568  int ret;
569 
570  av_assert0(!ifp->ist);
571 
572  ifp->ist = ist;
573  ifp->type_src = ist->st->codecpar->codec_type;
574 
575  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph));
576  if (ret < 0)
577  return ret;
578 
579  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
580  ifp->sub2video.frame = av_frame_alloc();
581  if (!ifp->sub2video.frame)
582  return AVERROR(ENOMEM);
583  }
584 
585  return 0;
586 }
587 
589 {
590  const AVCodec *c = ost->enc_ctx->codec;
591  int i, err;
592 
593  if (ost->enc_ctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
594  /* Pass the layout through for all orders but UNSPEC */
595  err = av_channel_layout_copy(&f->ch_layout, &ost->enc_ctx->ch_layout);
596  if (err < 0)
597  return err;
598  return 0;
599  }
600 
601  /* Requested layout is of order UNSPEC */
602  if (!c->ch_layouts) {
603  /* Use the default native layout for the requested amount of channels when the
604  encoder doesn't have a list of supported layouts */
605  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
606  return 0;
607  }
608  /* Encoder has a list of supported layouts. Pick the first layout in it with the
609  same amount of channels as the requested layout */
610  for (i = 0; c->ch_layouts[i].nb_channels; i++) {
611  if (c->ch_layouts[i].nb_channels == ost->enc_ctx->ch_layout.nb_channels)
612  break;
613  }
614  if (c->ch_layouts[i].nb_channels) {
615  /* Use it if one is found */
616  err = av_channel_layout_copy(&f->ch_layout, &c->ch_layouts[i]);
617  if (err < 0)
618  return err;
619  return 0;
620  }
621  /* If no layout for the amount of channels requested was found, use the default
622  native layout for it. */
623  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
624 
625  return 0;
626 }
627 
629 {
630  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
631  FilterGraph *fg = ofilter->graph;
632  FilterGraphPriv *fgp = fgp_from_fg(fg);
633  const AVCodec *c = ost->enc_ctx->codec;
634 
635  av_assert0(!ofilter->ost);
636 
637  ofilter->ost = ost;
638  av_freep(&ofilter->linklabel);
639 
640  switch (ost->enc_ctx->codec_type) {
641  case AVMEDIA_TYPE_VIDEO:
642  ofp->width = ost->enc_ctx->width;
643  ofp->height = ost->enc_ctx->height;
644  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
645  ofp->format = ost->enc_ctx->pix_fmt;
646  } else {
647  ofp->formats = c->pix_fmts;
648 
649  // MJPEG encoder exports a full list of supported pixel formats,
650  // but the full-range ones are experimental-only.
651  // Restrict the auto-conversion list unless -strict experimental
652  // has been specified.
653  if (!strcmp(c->name, "mjpeg")) {
654  // FIXME: YUV420P etc. are actually supported with full color range,
655  // yet the latter information isn't available here.
656  static const enum AVPixelFormat mjpeg_formats[] =
658  AV_PIX_FMT_NONE };
659 
660  const AVDictionaryEntry *strict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
661  int strict_val = ost->enc_ctx->strict_std_compliance;
662 
663  if (strict) {
664  const AVOption *o = av_opt_find(ost->enc_ctx, strict->key, NULL, 0, 0);
665  av_assert0(o);
666  av_opt_eval_int(ost->enc_ctx, o, strict->value, &strict_val);
667  }
668 
669  if (strict_val > FF_COMPLIANCE_UNOFFICIAL)
670  ofp->formats = mjpeg_formats;
671  }
672  }
673 
674  fgp->disable_conversions |= ost->keep_pix_fmt;
675 
676  break;
677  case AVMEDIA_TYPE_AUDIO:
678  if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
679  ofp->format = ost->enc_ctx->sample_fmt;
680  } else {
681  ofp->formats = c->sample_fmts;
682  }
683  if (ost->enc_ctx->sample_rate) {
684  ofp->sample_rate = ost->enc_ctx->sample_rate;
685  } else {
686  ofp->sample_rates = c->supported_samplerates;
687  }
688  if (ost->enc_ctx->ch_layout.nb_channels) {
689  int ret = set_channel_layout(ofp, ost);
690  if (ret < 0)
691  return ret;
692  } else if (c->ch_layouts) {
693  ofp->ch_layouts = c->ch_layouts;
694  }
695  break;
696  }
697 
698  // if we have all input parameters and all outputs are bound,
699  // the graph can now be configured
701  int ret;
702 
703  for (int i = 0; i < fg->nb_outputs; i++)
704  if (!fg->outputs[i]->ost)
705  return 0;
706 
708  if (ret < 0) {
709  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
710  av_err2str(ret));
711  return ret;
712  }
713  }
714 
715  return 0;
716 }
717 
719 {
720  InputFilterPriv *ifp;
721  InputFilter *ifilter;
722 
723  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
724  if (!ifp)
725  return NULL;
726 
727  ifilter = &ifp->ifilter;
728  ifilter->graph = fg;
729 
730  ifp->frame = av_frame_alloc();
731  if (!ifp->frame)
732  return NULL;
733 
734  ifp->format = -1;
735  ifp->fallback.format = -1;
736 
738  if (!ifp->frame_queue)
739  return NULL;
740 
741  return ifilter;
742 }
743 
744 void fg_free(FilterGraph **pfg)
745 {
746  FilterGraph *fg = *pfg;
747  FilterGraphPriv *fgp;
748 
749  if (!fg)
750  return;
751  fgp = fgp_from_fg(fg);
752 
754  for (int j = 0; j < fg->nb_inputs; j++) {
755  InputFilter *ifilter = fg->inputs[j];
756  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
757 
758  if (ifp->frame_queue) {
759  AVFrame *frame;
760  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
763  }
765 
767 
768  av_frame_free(&ifp->frame);
769 
771  av_freep(&ifp->linklabel);
772  av_freep(&ifilter->name);
773  av_freep(&fg->inputs[j]);
774  }
775  av_freep(&fg->inputs);
776  for (int j = 0; j < fg->nb_outputs; j++) {
777  OutputFilter *ofilter = fg->outputs[j];
778  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
779 
780  av_freep(&ofilter->linklabel);
781  av_freep(&ofilter->name);
783  av_freep(&fg->outputs[j]);
784  }
785  av_freep(&fg->outputs);
786  av_freep(&fgp->graph_desc);
787 
788  av_frame_free(&fgp->frame);
789 
790  av_freep(pfg);
791 }
792 
793 static const char *fg_item_name(void *obj)
794 {
795  const FilterGraphPriv *fgp = obj;
796 
797  return fgp->log_name;
798 }
799 
800 static const AVClass fg_class = {
801  .class_name = "FilterGraph",
802  .version = LIBAVUTIL_VERSION_INT,
803  .item_name = fg_item_name,
804  .category = AV_CLASS_CATEGORY_FILTER,
805 };
806 
807 int fg_create(FilterGraph **pfg, char *graph_desc)
808 {
809  FilterGraphPriv *fgp;
810  FilterGraph *fg;
811 
813  AVFilterGraph *graph;
814  int ret = 0;
815 
816  fgp = allocate_array_elem(&filtergraphs, sizeof(*fgp), &nb_filtergraphs);
817  if (!fgp)
818  return AVERROR(ENOMEM);
819  fg = &fgp->fg;
820 
821  if (pfg)
822  *pfg = fg;
823 
824  fg->class = &fg_class;
825  fg->index = nb_filtergraphs - 1;
826  fgp->graph_desc = graph_desc;
828 
829  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
830 
831  fgp->frame = av_frame_alloc();
832  if (!fgp->frame)
833  return AVERROR(ENOMEM);
834 
835  /* this graph is only used for determining the kinds of inputs
836  * and outputs we have, and is discarded on exit from this function */
837  graph = avfilter_graph_alloc();
838  if (!graph)
839  return AVERROR(ENOMEM);;
840  graph->nb_threads = 1;
841 
842  ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
843  if (ret < 0)
844  goto fail;
845 
846  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
847  InputFilter *const ifilter = ifilter_alloc(fg);
848  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
849 
850  ifp->linklabel = cur->name;
851  cur->name = NULL;
852 
853  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
854  cur->pad_idx);
855  ifilter->name = describe_filter_link(fg, cur, 1);
856  if (!ifilter->name) {
857  ret = AVERROR(ENOMEM);
858  goto fail;
859  }
860  }
861 
862  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
863  OutputFilter *const ofilter = ofilter_alloc(fg);
864 
865  if (!ofilter)
866  goto fail;
867 
868  ofilter->linklabel = cur->name;
869  cur->name = NULL;
870 
871  ofilter->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
872  cur->pad_idx);
873  ofilter->name = describe_filter_link(fg, cur, 0);
874  if (!ofilter->name) {
875  ret = AVERROR(ENOMEM);
876  goto fail;
877  }
878  }
879 
880  if (!fg->nb_outputs) {
881  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
882  ret = AVERROR(ENOSYS);
883  goto fail;
884  }
885 
886 fail:
889  avfilter_graph_free(&graph);
890 
891  if (ret < 0)
892  return ret;
893 
894  return 0;
895 }
896 
898  char *graph_desc)
899 {
900  FilterGraph *fg;
901  FilterGraphPriv *fgp;
902  int ret;
903 
904  ret = fg_create(&fg, graph_desc);
905  if (ret < 0)
906  return ret;
907  fgp = fgp_from_fg(fg);
908 
909  fgp->is_simple = 1;
910 
911  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf#%d:%d",
912  av_get_media_type_string(ost->type)[0],
913  ost->file_index, ost->index);
914 
915  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
916  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
917  "to have exactly 1 input and 1 output. "
918  "However, it had %d input(s) and %d output(s). Please adjust, "
919  "or use a complex filtergraph (-filter_complex) instead.\n",
920  graph_desc, fg->nb_inputs, fg->nb_outputs);
921  return AVERROR(EINVAL);
922  }
923 
924  ost->filter = fg->outputs[0];
925 
926  ret = ifilter_bind_ist(fg->inputs[0], ist);
927  if (ret < 0)
928  return ret;
929 
930  ret = ofilter_bind_ost(fg->outputs[0], ost);
931  if (ret < 0)
932  return ret;
933 
934  return 0;
935 }
936 
937 static int init_input_filter(FilterGraph *fg, InputFilter *ifilter)
938 {
939  FilterGraphPriv *fgp = fgp_from_fg(fg);
940  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
941  InputStream *ist = NULL;
942  enum AVMediaType type = ifp->type;
943  int i, ret;
944 
945  // TODO: support other filter types
947  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
948  "currently.\n");
949  return AVERROR(ENOSYS);
950  }
951 
952  if (ifp->linklabel) {
954  AVStream *st = NULL;
955  char *p;
956  int file_idx = strtol(ifp->linklabel, &p, 0);
957 
958  if (file_idx < 0 || file_idx >= nb_input_files) {
959  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
960  file_idx, fgp->graph_desc);
961  return AVERROR(EINVAL);
962  }
963  s = input_files[file_idx]->ctx;
964 
965  for (i = 0; i < s->nb_streams; i++) {
966  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
967  if (stream_type != type &&
968  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
969  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
970  continue;
971  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
972  st = s->streams[i];
973  break;
974  }
975  }
976  if (!st) {
977  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
978  "matches no streams.\n", p, fgp->graph_desc);
979  return AVERROR(EINVAL);
980  }
981  ist = input_files[file_idx]->streams[st->index];
982  } else {
983  ist = ist_find_unused(type);
984  if (!ist) {
985  av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
986  "unlabeled input pad %s\n", ifilter->name);
987  return AVERROR(EINVAL);
988  }
989  }
990  av_assert0(ist);
991 
992  ret = ifilter_bind_ist(ifilter, ist);
993  if (ret < 0) {
994  av_log(fg, AV_LOG_ERROR,
995  "Error binding an input stream to complex filtergraph input %s.\n",
996  ifilter->name);
997  return ret;
998  }
999 
1000  return 0;
1001 }
1002 
1004 {
1005  // bind filtergraph inputs to input streams
1006  for (int i = 0; i < fg->nb_inputs; i++) {
1007  int ret = init_input_filter(fg, fg->inputs[i]);
1008  if (ret < 0)
1009  return ret;
1010  }
1011  return 0;
1012 }
1013 
1014 static int insert_trim(int64_t start_time, int64_t duration,
1015  AVFilterContext **last_filter, int *pad_idx,
1016  const char *filter_name)
1017 {
1018  AVFilterGraph *graph = (*last_filter)->graph;
1020  const AVFilter *trim;
1021  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1022  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1023  int ret = 0;
1024 
1025  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1026  return 0;
1027 
1028  trim = avfilter_get_by_name(name);
1029  if (!trim) {
1030  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
1031  "recording time.\n", name);
1032  return AVERROR_FILTER_NOT_FOUND;
1033  }
1034 
1035  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1036  if (!ctx)
1037  return AVERROR(ENOMEM);
1038 
1039  if (duration != INT64_MAX) {
1040  ret = av_opt_set_int(ctx, "durationi", duration,
1042  }
1043  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1044  ret = av_opt_set_int(ctx, "starti", start_time,
1046  }
1047  if (ret < 0) {
1048  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1049  return ret;
1050  }
1051 
1053  if (ret < 0)
1054  return ret;
1055 
1056  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1057  if (ret < 0)
1058  return ret;
1059 
1060  *last_filter = ctx;
1061  *pad_idx = 0;
1062  return 0;
1063 }
1064 
1065 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1066  const char *filter_name, const char *args)
1067 {
1068  AVFilterGraph *graph = (*last_filter)->graph;
1070  int ret;
1071 
1073  avfilter_get_by_name(filter_name),
1074  filter_name, args, NULL, graph);
1075  if (ret < 0)
1076  return ret;
1077 
1078  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1079  if (ret < 0)
1080  return ret;
1081 
1082  *last_filter = ctx;
1083  *pad_idx = 0;
1084  return 0;
1085 }
1086 
1088 {
1089  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1090  OutputStream *ost = ofilter->ost;
1091  OutputFile *of = output_files[ost->file_index];
1092  AVFilterContext *last_filter = out->filter_ctx;
1093  AVBPrint bprint;
1094  int pad_idx = out->pad_idx;
1095  int ret;
1096  const char *pix_fmts;
1097  char name[255];
1098 
1099  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
1101  avfilter_get_by_name("buffersink"),
1102  name, NULL, NULL, fg->graph);
1103 
1104  if (ret < 0)
1105  return ret;
1106 
1107  if ((ofp->width || ofp->height) && ofilter->ost->autoscale) {
1108  char args[255];
1110  const AVDictionaryEntry *e = NULL;
1111 
1112  snprintf(args, sizeof(args), "%d:%d",
1113  ofp->width, ofp->height);
1114 
1115  while ((e = av_dict_iterate(ost->sws_dict, e))) {
1116  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1117  }
1118 
1119  snprintf(name, sizeof(name), "scaler_out_%d_%d",
1120  ost->file_index, ost->index);
1122  name, args, NULL, fg->graph)) < 0)
1123  return ret;
1124  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1125  return ret;
1126 
1127  last_filter = filter;
1128  pad_idx = 0;
1129  }
1130 
1132  ret = choose_pix_fmts(ofilter, &bprint, &pix_fmts);
1133  if (ret < 0)
1134  return ret;
1135 
1136  if (pix_fmts) {
1138 
1140  avfilter_get_by_name("format"),
1141  "format", pix_fmts, NULL, fg->graph);
1142  av_bprint_finalize(&bprint, NULL);
1143  if (ret < 0)
1144  return ret;
1145  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1146  return ret;
1147 
1148  last_filter = filter;
1149  pad_idx = 0;
1150  }
1151 
1152  snprintf(name, sizeof(name), "trim_out_%d_%d",
1153  ost->file_index, ost->index);
1155  &last_filter, &pad_idx, name);
1156  if (ret < 0)
1157  return ret;
1158 
1159 
1160  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1161  return ret;
1162 
1163  return 0;
1164 }
1165 
1167 {
1168  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1169  OutputStream *ost = ofilter->ost;
1170  OutputFile *of = output_files[ost->file_index];
1171  AVFilterContext *last_filter = out->filter_ctx;
1172  int pad_idx = out->pad_idx;
1173  AVBPrint args;
1174  char name[255];
1175  int ret;
1176 
1177  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
1179  avfilter_get_by_name("abuffersink"),
1180  name, NULL, NULL, fg->graph);
1181  if (ret < 0)
1182  return ret;
1183  if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1184  return ret;
1185 
1186 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1187  AVFilterContext *filt_ctx; \
1188  \
1189  av_log(fg, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1190  "similarly to -af " filter_name "=%s.\n", arg); \
1191  \
1192  ret = avfilter_graph_create_filter(&filt_ctx, \
1193  avfilter_get_by_name(filter_name), \
1194  filter_name, arg, NULL, fg->graph); \
1195  if (ret < 0) \
1196  goto fail; \
1197  \
1198  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1199  if (ret < 0) \
1200  goto fail; \
1201  \
1202  last_filter = filt_ctx; \
1203  pad_idx = 0; \
1204 } while (0)
1206 #if FFMPEG_OPT_MAP_CHANNEL
1207  if (ost->audio_channels_mapped) {
1208  AVChannelLayout mapped_layout = { 0 };
1209  int i;
1210  av_channel_layout_default(&mapped_layout, ost->audio_channels_mapped);
1211  av_channel_layout_describe_bprint(&mapped_layout, &args);
1212  for (i = 0; i < ost->audio_channels_mapped; i++)
1213  if (ost->audio_channels_map[i] != -1)
1214  av_bprintf(&args, "|c%d=c%d", i, ost->audio_channels_map[i]);
1215 
1216  AUTO_INSERT_FILTER("-map_channel", "pan", args.str);
1217  av_bprint_clear(&args);
1218  }
1219 #endif
1220 
1221  choose_sample_fmts(ofp, &args);
1222  choose_sample_rates(ofp, &args);
1223  choose_channel_layouts(ofp, &args);
1224  if (!av_bprint_is_complete(&args)) {
1225  ret = AVERROR(ENOMEM);
1226  goto fail;
1227  }
1228  if (args.len) {
1230 
1231  snprintf(name, sizeof(name), "format_out_%d_%d",
1232  ost->file_index, ost->index);
1234  avfilter_get_by_name("aformat"),
1235  name, args.str, NULL, fg->graph);
1236  if (ret < 0)
1237  goto fail;
1238 
1239  ret = avfilter_link(last_filter, pad_idx, format, 0);
1240  if (ret < 0)
1241  goto fail;
1242 
1243  last_filter = format;
1244  pad_idx = 0;
1245  }
1246 
1247  if (ost->apad && of->shortest) {
1248  int i;
1249 
1250  for (i = 0; i < of->nb_streams; i++)
1252  break;
1253 
1254  if (i < of->nb_streams) {
1255  AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
1256  }
1257  }
1258 
1259  snprintf(name, sizeof(name), "trim for output stream %d:%d",
1260  ost->file_index, ost->index);
1262  &last_filter, &pad_idx, name);
1263  if (ret < 0)
1264  goto fail;
1265 
1266  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1267  goto fail;
1268 fail:
1269  av_bprint_finalize(&args, NULL);
1270 
1271  return ret;
1272 }
1273 
1275  AVFilterInOut *out)
1276 {
1277  if (!ofilter->ost) {
1278  av_log(fg, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
1279  return AVERROR(EINVAL);
1280  }
1281 
1282  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
1283  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
1284  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
1285  default: av_assert0(0); return 0;
1286  }
1287 }
1288 
1290 {
1291  int i;
1292  for (i = 0; i < nb_filtergraphs; i++) {
1293  int n;
1294  for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
1296  if (!output->ost) {
1298  "Filter %s has an unconnected output\n", output->name);
1299  return AVERROR(EINVAL);
1300  }
1301  }
1302  }
1303 
1304  return 0;
1305 }
1306 
1308 {
1309  ifp->sub2video.last_pts = INT64_MIN;
1310  ifp->sub2video.end_pts = INT64_MIN;
1311 
1312  /* sub2video structure has been (re-)initialized.
1313  Mark it as such so that the system will be
1314  initialized with the first received heartbeat. */
1315  ifp->sub2video.initialize = 1;
1316 }
1317 
1319  AVFilterInOut *in)
1320 {
1321  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1322 
1323  AVFilterContext *last_filter;
1324  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1325  const AVPixFmtDescriptor *desc;
1326  InputStream *ist = ifp->ist;
1327  InputFile *f = input_files[ist->file_index];
1328  AVRational fr = ist->framerate;
1329  AVRational sar;
1330  AVBPrint args;
1331  char name[255];
1332  int ret, pad_idx = 0;
1333  int64_t tsoffset = 0;
1335 
1336  if (!par)
1337  return AVERROR(ENOMEM);
1338  memset(par, 0, sizeof(*par));
1339  par->format = AV_PIX_FMT_NONE;
1340 
1341  if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1342  av_log(fg, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
1343  ret = AVERROR(EINVAL);
1344  goto fail;
1345  }
1346 
1347  if (!fr.num)
1348  fr = ist->framerate_guessed;
1349 
1350  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1351  sub2video_prepare(ifp);
1352 
1353  ifp->time_base = ist->framerate.num ? av_inv_q(ist->framerate) :
1354  ist->st->time_base;
1355 
1356  sar = ifp->sample_aspect_ratio;
1357  if(!sar.den)
1358  sar = (AVRational){0,1};
1360  av_bprintf(&args,
1361  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1362  "pixel_aspect=%d/%d",
1363  ifp->width, ifp->height, ifp->format,
1364  ifp->time_base.num, ifp->time_base.den, sar.num, sar.den);
1365  if (fr.num && fr.den)
1366  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1367  snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
1368  ist->file_index, ist->index);
1369 
1370 
1371  if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
1372  args.str, NULL, fg->graph)) < 0)
1373  goto fail;
1374  par->hw_frames_ctx = ifp->hw_frames_ctx;
1375  ret = av_buffersrc_parameters_set(ifp->filter, par);
1376  if (ret < 0)
1377  goto fail;
1378  av_freep(&par);
1379  last_filter = ifp->filter;
1380 
1382  av_assert0(desc);
1383 
1384  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1385  if (ist->autorotate && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1386  int32_t *displaymatrix = ifp->displaymatrix;
1387  double theta;
1388 
1389  if (!ifp->displaymatrix_present)
1391  theta = get_rotation(displaymatrix);
1392 
1393  if (fabs(theta - 90) < 1.0) {
1394  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1395  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1396  } else if (fabs(theta - 180) < 1.0) {
1397  if (displaymatrix[0] < 0) {
1398  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1399  if (ret < 0)
1400  return ret;
1401  }
1402  if (displaymatrix[4] < 0) {
1403  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1404  }
1405  } else if (fabs(theta - 270) < 1.0) {
1406  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1407  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1408  } else if (fabs(theta) > 1.0) {
1409  char rotate_buf[64];
1410  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1411  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1412  } else if (fabs(theta) < 1.0) {
1413  if (displaymatrix && displaymatrix[4] < 0) {
1414  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1415  }
1416  }
1417  if (ret < 0)
1418  return ret;
1419  }
1420 
1421  snprintf(name, sizeof(name), "trim_in_%d_%d",
1422  ist->file_index, ist->index);
1423  if (copy_ts) {
1424  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
1425  if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
1426  tsoffset += f->ctx->start_time;
1427  }
1428  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
1429  AV_NOPTS_VALUE : tsoffset, f->recording_time,
1430  &last_filter, &pad_idx, name);
1431  if (ret < 0)
1432  return ret;
1433 
1434  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1435  return ret;
1436  return 0;
1437 fail:
1438  av_freep(&par);
1439 
1440  return ret;
1441 }
1442 
1444  AVFilterInOut *in)
1445 {
1446  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1447  AVFilterContext *last_filter;
1448  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1449  InputStream *ist = ifp->ist;
1450  InputFile *f = input_files[ist->file_index];
1451  AVBPrint args;
1452  char name[255];
1453  int ret, pad_idx = 0;
1454  int64_t tsoffset = 0;
1455 
1456  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
1457  av_log(fg, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
1458  return AVERROR(EINVAL);
1459  }
1460 
1461  ifp->time_base = (AVRational){ 1, ifp->sample_rate };
1462 
1464  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1465  ifp->time_base.num, ifp->time_base.den,
1466  ifp->sample_rate,
1468  if (av_channel_layout_check(&ifp->ch_layout) &&
1470  av_bprintf(&args, ":channel_layout=");
1472  } else
1473  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1474  snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
1475  ist->file_index, ist->index);
1476 
1477  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1478  name, args.str, NULL,
1479  fg->graph)) < 0)
1480  return ret;
1481  last_filter = ifp->filter;
1482 
1483  snprintf(name, sizeof(name), "trim for input stream %d:%d",
1484  ist->file_index, ist->index);
1485  if (copy_ts) {
1486  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
1487  if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
1488  tsoffset += f->ctx->start_time;
1489  }
1490  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
1491  AV_NOPTS_VALUE : tsoffset, f->recording_time,
1492  &last_filter, &pad_idx, name);
1493  if (ret < 0)
1494  return ret;
1495 
1496  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1497  return ret;
1498 
1499  return 0;
1500 }
1501 
1503  AVFilterInOut *in)
1504 {
1505  switch (ifp_from_ifilter(ifilter)->type) {
1506  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
1507  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
1508  default: av_assert0(0); return 0;
1509  }
1510 }
1511 
1513 {
1514  int i;
1515  for (i = 0; i < fg->nb_outputs; i++)
1517  for (i = 0; i < fg->nb_inputs; i++)
1518  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1519  avfilter_graph_free(&fg->graph);
1520 }
1521 
1523 {
1524  return f->nb_inputs == 0 &&
1525  (!strcmp(f->filter->name, "buffer") ||
1526  !strcmp(f->filter->name, "abuffer"));
1527 }
1528 
1529 static int graph_is_meta(AVFilterGraph *graph)
1530 {
1531  for (unsigned i = 0; i < graph->nb_filters; i++) {
1532  const AVFilterContext *f = graph->filters[i];
1533 
1534  /* in addition to filters flagged as meta, also
1535  * disregard sinks and buffersources (but not other sources,
1536  * since they introduce data we are not aware of)
1537  */
1538  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1539  f->nb_outputs == 0 ||
1541  return 0;
1542  }
1543  return 1;
1544 }
1545 
1547 {
1548  FilterGraphPriv *fgp = fgp_from_fg(fg);
1549  AVBufferRef *hw_device;
1550  AVFilterInOut *inputs, *outputs, *cur;
1551  int ret, i, simple = filtergraph_is_simple(fg);
1552  const char *graph_desc = fgp->graph_desc;
1553 
1554  cleanup_filtergraph(fg);
1555  if (!(fg->graph = avfilter_graph_alloc()))
1556  return AVERROR(ENOMEM);
1557 
1558  if (simple) {
1559  OutputStream *ost = fg->outputs[0]->ost;
1560 
1561  if (filter_nbthreads) {
1562  ret = av_opt_set(fg->graph, "threads", filter_nbthreads, 0);
1563  if (ret < 0)
1564  goto fail;
1565  } else {
1566  const AVDictionaryEntry *e = NULL;
1567  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1568  if (e)
1569  av_opt_set(fg->graph, "threads", e->value, 0);
1570  }
1571 
1572  if (av_dict_count(ost->sws_dict)) {
1573  ret = av_dict_get_string(ost->sws_dict,
1574  &fg->graph->scale_sws_opts,
1575  '=', ':');
1576  if (ret < 0)
1577  goto fail;
1578  }
1579 
1580  if (av_dict_count(ost->swr_opts)) {
1581  char *args;
1582  ret = av_dict_get_string(ost->swr_opts, &args, '=', ':');
1583  if (ret < 0)
1584  goto fail;
1585  av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
1586  av_free(args);
1587  }
1588  } else {
1590  }
1591 
1592  hw_device = hw_device_for_filter();
1593 
1594  if ((ret = graph_parse(fg->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
1595  goto fail;
1596 
1597  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1598  if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
1601  goto fail;
1602  }
1604 
1605  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1606  ret = configure_output_filter(fg, fg->outputs[i], cur);
1607  if (ret < 0) {
1609  goto fail;
1610  }
1611  }
1613 
1614  if (fgp->disable_conversions)
1616  if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1617  goto fail;
1618 
1619  fgp->is_meta = graph_is_meta(fg->graph);
1620 
1621  /* limit the lists of allowed formats to the ones selected, to
1622  * make sure they stay the same if the filtergraph is reconfigured later */
1623  for (i = 0; i < fg->nb_outputs; i++) {
1624  OutputFilter *ofilter = fg->outputs[i];
1625  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1626  AVFilterContext *sink = ofp->filter;
1627 
1628  ofp->format = av_buffersink_get_format(sink);
1629 
1630  ofp->width = av_buffersink_get_w(sink);
1631  ofp->height = av_buffersink_get_h(sink);
1632 
1635 
1638  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1639  if (ret < 0)
1640  goto fail;
1641  }
1642 
1643  for (i = 0; i < fg->nb_inputs; i++) {
1645  AVFrame *tmp;
1646  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1647  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1648  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)tmp->buf[0]->data);
1649  } else {
1651  }
1652  av_frame_free(&tmp);
1653  if (ret < 0)
1654  goto fail;
1655  }
1656  }
1657 
1658  /* send the EOFs for the finished inputs */
1659  for (i = 0; i < fg->nb_inputs; i++) {
1661  if (ifp->eof) {
1663  if (ret < 0)
1664  goto fail;
1665  }
1666  }
1667 
1668  return 0;
1669 
1670 fail:
1671  cleanup_filtergraph(fg);
1672  return ret;
1673 }
1674 
1676 {
1677  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1678 
1679  if (dec->codec_type == AVMEDIA_TYPE_VIDEO) {
1680  ifp->fallback.format = dec->pix_fmt;
1681  ifp->fallback.width = dec->width;
1682  ifp->fallback.height = dec->height;
1684  } else if (dec->codec_type == AVMEDIA_TYPE_AUDIO) {
1685  int ret;
1686 
1687  ifp->fallback.format = dec->sample_fmt;
1688  ifp->fallback.sample_rate = dec->sample_rate;
1689 
1691  if (ret < 0)
1692  return ret;
1693  } else {
1694  // for subtitles (i.e. sub2video) we set the actual parameters,
1695  // rather than just fallback
1696  ifp->width = ifp->ist->sub2video.w;
1697  ifp->height = ifp->ist->sub2video.h;
1698 
1699  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
1700  palettes for all rectangles are identical or compatible */
1701  ifp->format = AV_PIX_FMT_RGB32;
1702 
1703  av_log(NULL, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n", ifp->width, ifp->height);
1704  }
1705 
1706  return 0;
1707 }
1708 
1710 {
1711  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1712  AVFrameSideData *sd;
1713  int ret;
1714 
1715  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
1716  if (ret < 0)
1717  return ret;
1718 
1719  ifp->format = frame->format;
1720 
1721  ifp->width = frame->width;
1722  ifp->height = frame->height;
1723  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
1724 
1725  ifp->sample_rate = frame->sample_rate;
1726  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
1727  if (ret < 0)
1728  return ret;
1729 
1731  if (sd)
1732  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
1733  ifp->displaymatrix_present = !!sd;
1734 
1735  return 0;
1736 }
1737 
1739 {
1740  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
1741  return fgp->is_simple;
1742 }
1743 
1744 void fg_send_command(FilterGraph *fg, double time, const char *target,
1745  const char *command, const char *arg, int all_filters)
1746 {
1747  int ret;
1748 
1749  if (!fg->graph)
1750  return;
1751 
1752  if (time < 0) {
1753  char response[4096];
1754  ret = avfilter_graph_send_command(fg->graph, target, command, arg,
1755  response, sizeof(response),
1756  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
1757  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
1758  fg->index, ret, response);
1759  } else if (!all_filters) {
1760  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
1761  } else {
1762  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
1763  if (ret < 0)
1764  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
1765  }
1766 }
1767 
1769 {
1771  OutputStream *ost = ofp->ofilter.ost;
1772  AVFrame *frame = fgp->frame;
1773  AVFilterContext *filter = ofp->filter;
1774  FrameData *fd;
1775  int ret;
1776 
1779  if (ret < 0) {
1780  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1781  av_log(fgp, AV_LOG_WARNING,
1782  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1783  } else if (flush && ret == AVERROR_EOF && ofp->got_frame &&
1785  ret = enc_frame(ost, NULL);
1786  if (ret < 0)
1787  return ret;
1788  }
1789 
1790  return 1;
1791  }
1792  if (ost->finished) {
1794  return 0;
1795  }
1796 
1797  if (frame->pts != AV_NOPTS_VALUE) {
1799  ost->filter->last_pts = av_rescale_q(frame->pts, tb, AV_TIME_BASE_Q);
1800  frame->time_base = tb;
1801 
1802  if (debug_ts)
1803  av_log(fgp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
1804  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &tb), tb.num, tb.den);
1805  }
1806 
1807  fd = frame_data(frame);
1808  if (!fd) {
1810  return AVERROR(ENOMEM);
1811  }
1812 
1813  // only use bits_per_raw_sample passed through from the decoder
1814  // if the filtergraph did not touch the frame data
1815  if (!fgp->is_meta)
1816  fd->bits_per_raw_sample = 0;
1817 
1818  if (ost->type == AVMEDIA_TYPE_VIDEO) {
1820  if (fr.num > 0 && fr.den > 0) {
1821  fd->frame_rate_filter = fr;
1822 
1823  if (!frame->duration)
1824  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
1825  }
1826  }
1827 
1828  ret = enc_frame(ost, frame);
1830  if (ret < 0)
1831  return ret;
1832 
1833  ofp->got_frame = 1;
1834 
1835  return 0;
1836 }
1837 
1839 {
1840  if (!fg->graph)
1841  return 0;
1842 
1843  /* Reap all buffers present in the buffer sinks */
1844  for (int i = 0; i < fg->nb_outputs; i++) {
1846  int ret = 0;
1847 
1848  while (!ret) {
1849  ret = fg_output_step(ofp, flush);
1850  if (ret < 0)
1851  return ret;
1852  }
1853  }
1854 
1855  return 0;
1856 }
1857 
1859 {
1860  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1861  int64_t pts2;
1862 
1863  if (!ifilter->graph->graph)
1864  return;
1865 
1866  /* subtitles seem to be usually muxed ahead of other streams;
1867  if not, subtracting a larger time here is necessary */
1868  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
1869 
1870  /* do not send the heartbeat frame if the subtitle is already ahead */
1871  if (pts2 <= ifp->sub2video.last_pts)
1872  return;
1873 
1874  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
1875  /* if we have hit the end of the current displayed subpicture,
1876  or if we need to initialize the system, update the
1877  overlayed subpicture and its start/end times */
1878  sub2video_update(ifp, pts2 + 1, NULL);
1879 
1881  sub2video_push_ref(ifp, pts2);
1882 }
1883 
1885 {
1886  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1887  int ret;
1888 
1889  if (ifilter->graph->graph) {
1890  if (!frame) {
1891  if (ifp->sub2video.end_pts < INT64_MAX)
1892  sub2video_update(ifp, INT64_MAX, NULL);
1893 
1894  return av_buffersrc_add_frame(ifp->filter, NULL);
1895  }
1896 
1897  ifp->width = frame->width ? frame->width : ifp->width;
1898  ifp->height = frame->height ? frame->height : ifp->height;
1899 
1900  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
1901  } else if (frame) {
1903 
1904  if (!tmp)
1905  return AVERROR(ENOMEM);
1906 
1907  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
1908  if (ret < 0) {
1909  av_frame_free(&tmp);
1910  return ret;
1911  }
1912  }
1913 
1914  return 0;
1915 }
1916 
1918 {
1919  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1920  int ret;
1921 
1922  ifp->eof = 1;
1923 
1924  if (ifp->filter) {
1925  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
1927 
1929  if (ret < 0)
1930  return ret;
1931  } else {
1932  if (ifp->format < 0) {
1933  // the filtergraph was never configured, use the fallback parameters
1934  ifp->format = ifp->fallback.format;
1935  ifp->sample_rate = ifp->fallback.sample_rate;
1936  ifp->width = ifp->fallback.width;
1937  ifp->height = ifp->fallback.height;
1939 
1941  &ifp->fallback.ch_layout);
1942  if (ret < 0)
1943  return ret;
1944 
1945  if (ifilter_has_all_input_formats(ifilter->graph)) {
1946  ret = configure_filtergraph(ifilter->graph);
1947  if (ret < 0) {
1948  av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
1949  return ret;
1950  }
1951  }
1952  }
1953 
1954  if (ifp->format < 0) {
1956  "Cannot determine format of input stream %d:%d after EOF\n",
1957  ifp->ist->file_index, ifp->ist->index);
1958  return AVERROR_INVALIDDATA;
1959  }
1960  }
1961 
1962  return 0;
1963 }
1964 
1965 int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
1966 {
1967  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1968  FilterGraph *fg = ifilter->graph;
1969  AVFrameSideData *sd;
1970  int need_reinit, ret;
1971 
1972  /* determine if the parameters for this input changed */
1973  need_reinit = ifp->format != frame->format;
1974 
1975  switch (ifp->type) {
1976  case AVMEDIA_TYPE_AUDIO:
1977  need_reinit |= ifp->sample_rate != frame->sample_rate ||
1978  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout);
1979  break;
1980  case AVMEDIA_TYPE_VIDEO:
1981  need_reinit |= ifp->width != frame->width ||
1982  ifp->height != frame->height;
1983  break;
1984  }
1985 
1986  if (!ifp->ist->reinit_filters && fg->graph)
1987  need_reinit = 0;
1988 
1989  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
1990  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
1991  need_reinit = 1;
1992 
1994  if (!ifp->displaymatrix_present ||
1995  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
1996  need_reinit = 1;
1997  } else if (ifp->displaymatrix_present)
1998  need_reinit = 1;
1999 
2000  if (need_reinit) {
2002  if (ret < 0)
2003  return ret;
2004  }
2005 
2006  /* (re)init the graph if possible, otherwise buffer the frame and return */
2007  if (need_reinit || !fg->graph) {
2008  if (!ifilter_has_all_input_formats(fg)) {
2010  if (!tmp)
2011  return AVERROR(ENOMEM);
2012 
2013  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2014  if (ret < 0)
2015  av_frame_free(&tmp);
2016 
2017  return ret;
2018  }
2019 
2020  ret = reap_filters(fg, 0);
2021  if (ret < 0 && ret != AVERROR_EOF) {
2022  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2023  return ret;
2024  }
2025 
2026  ret = configure_filtergraph(fg);
2027  if (ret < 0) {
2028  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2029  return ret;
2030  }
2031  }
2032 
2033  if (keep_reference) {
2034  ret = av_frame_ref(ifp->frame, frame);
2035  if (ret < 0)
2036  return ret;
2037  } else
2038  av_frame_move_ref(ifp->frame, frame);
2039  frame = ifp->frame;
2040 
2041  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2042  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2043  frame->time_base = ifp->time_base;
2044 #if LIBAVUTIL_VERSION_MAJOR < 59
2046  frame->pkt_duration = frame->duration;
2047  )
2048 #endif
2049 
2052  if (ret < 0) {
2054  if (ret != AVERROR_EOF)
2055  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2056  return ret;
2057  }
2058 
2059  return 0;
2060 }
2061 
2063 {
2064  FilterGraphPriv *fgp = fgp_from_fg(graph);
2065  int i, ret;
2066  int nb_requests, nb_requests_max = 0;
2067  InputStream *ist;
2068 
2069  if (!graph->graph) {
2070  for (int i = 0; i < graph->nb_inputs; i++) {
2071  InputFilter *ifilter = graph->inputs[i];
2072  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2073  if (ifp->format < 0 && !ifp->eof) {
2074  *best_ist = ifp->ist;
2075  return 0;
2076  }
2077  }
2078 
2079  // graph not configured, but all inputs are either initialized or EOF
2080  for (int i = 0; i < graph->nb_outputs; i++)
2081  graph->outputs[i]->ost->inputs_done = 1;
2082 
2083  return 0;
2084  }
2085 
2086  *best_ist = NULL;
2088  if (ret >= 0)
2089  return reap_filters(graph, 0);
2090 
2091  if (ret == AVERROR_EOF) {
2092  reap_filters(graph, 1);
2093  for (int i = 0; i < graph->nb_outputs; i++) {
2094  OutputFilter *ofilter = graph->outputs[i];
2095  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2096 
2097  // we are finished and no frames were ever seen at this output,
2098  // at least initialize the encoder with a dummy frame
2099  if (!ofp->got_frame) {
2100  AVFrame *frame = fgp->frame;
2101 
2102  frame->time_base = ofp->time_base;
2103  frame->format = ofp->format;
2104 
2105  frame->width = ofp->width;
2106  frame->height = ofp->height;
2107  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2108 
2109  frame->sample_rate = ofp->sample_rate;
2110  if (ofp->ch_layout.nb_channels) {
2111  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2112  if (ret < 0)
2113  return ret;
2114  }
2115 
2116  av_assert0(!frame->buf[0]);
2117 
2118  av_log(ofilter->ost, AV_LOG_WARNING,
2119  "No filtered frames for output stream, trying to "
2120  "initialize anyway.\n");
2121 
2122  enc_open(ofilter->ost, frame);
2124  }
2125 
2126  close_output_stream(ofilter->ost);
2127  }
2128  return 0;
2129  }
2130  if (ret != AVERROR(EAGAIN))
2131  return ret;
2132 
2133  for (i = 0; i < graph->nb_inputs; i++) {
2134  InputFilter *ifilter = graph->inputs[i];
2135  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2136 
2137  ist = ifp->ist;
2138  if (input_files[ist->file_index]->eagain || ifp->eof)
2139  continue;
2140  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2141  if (nb_requests > nb_requests_max) {
2142  nb_requests_max = nb_requests;
2143  *best_ist = ist;
2144  }
2145  }
2146 
2147  if (!*best_ist)
2148  for (i = 0; i < graph->nb_outputs; i++)
2149  graph->outputs[i]->ost->unavailable = 1;
2150 
2151  return 0;
2152 }
AVSubtitle
Definition: avcodec.h:2267
formats
formats
Definition: signature.h:48
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, OutputStream *ost)
Definition: ffmpeg_filter.c:588
init_complex_filtergraph
int init_complex_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1003
AVCodec
AVCodec.
Definition: codec.h:187
OutputFilter::last_pts
int64_t last_pts
Definition: ffmpeg.h:303
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:742
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:221
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:293
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:84
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:140
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:124
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2272
opt.h
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1049
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:56
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:50
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:890
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:98
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:932
InputStream::framerate_guessed
AVRational framerate_guessed
Definition: ffmpeg.h:344
FrameData
Definition: ffmpeg.h:636
close_output_stream
void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:487
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:124
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:515
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:1271
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:243
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1064
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:72
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:312
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:148
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1709
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
InputStream::dec_ctx
AVCodecContext * dec_ctx
Definition: ffmpeg.h:340
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:100
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
AVSubtitleRect
Definition: avcodec.h:2239
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2271
ofilter_bind_ost
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost)
Definition: ffmpeg_filter.c:628
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:744
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:629
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1035
pixdesc.h
OutputStream::unavailable
int unavailable
Definition: ffmpeg.h:579
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:45
w
uint8_t w
Definition: llviddspenc.c:38
check_filter_outputs
int check_filter_outputs(void)
Definition: ffmpeg_filter.c:1289
AVOption
AVOption.
Definition: opt.h:251
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:157
FilterGraph::index
int index
Definition: ffmpeg.h:308
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:97
data
const char data[16]
Definition: mxf.c:148
ifilter_parameters_from_dec
int ifilter_parameters_from_dec(InputFilter *ifilter, const AVCodecContext *dec)
Set up fallback filtering parameters from a decoder context.
Definition: ffmpeg_filter.c:1675
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AV_NOWARN_DEPRECATED
#define AV_NOWARN_DEPRECATED(code)
Disable warnings about deprecated features This is useful for sections of code kept for backward comp...
Definition: attributes.h:126
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:313
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:161
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:306
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:370
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
sample_rate
sample_rate
Definition: ffmpeg_filter.c:331
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple)
Definition: ffmpeg_demux.c:892
FilterGraphPriv
Definition: ffmpeg_filter.c:41
InputFilterPriv::sub2video
struct InputFilterPriv::@5 sub2video
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:736
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
choose_pix_fmts
static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint, const char **dst)
Definition: ffmpeg_filter.c:276
OutputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:624
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
InputStream
Definition: ffmpeg.h:320
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:87
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:81
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1265
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
offset must point to a pointer immediately followed by an int for the length
Definition: opt.h:231
OutputFilterPriv
Definition: ffmpeg_filter.c:137
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2105
InputStream::sub2video
struct InputStream::sub2video sub2video
fail
#define fail()
Definition: checkasm.h:138
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:219
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:275
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:168
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:525
val
static double val(void *priv, double ch)
Definition: aeval.c:78
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:459
pts
static int64_t pts
Definition: transcode_aac.c:643
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1529
ifilter_sub2video_heartbeat
void ifilter_sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:1858
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:59
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:145
OutputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:148
AVRational::num
int num
Numerator.
Definition: rational.h:59
InputFile
Definition: ffmpeg.h:395
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
check_stream_specifier
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:917
OutputFile::shortest
int shortest
Definition: ffmpeg.h:631
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:75
avassert.h
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:645
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
InputFilterPriv
Definition: ffmpeg_filter.c:69
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:718
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1211
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:198
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:77
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:314
InputStream::framerate
AVRational framerate
Definition: ffmpeg.h:349
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1443
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:548
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:298
InputFilter
Definition: ffmpeg.h:286
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:297
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:292
ctx
AVFormatContext * ctx
Definition: movenc.c:48
nb_streams
static int nb_streams
Definition: ffprobe.c:315
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:609
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2273
graph_opts_apply
static int graph_opts_apply(AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:457
fg_transcode_step
int fg_transcode_step(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg_filter.c:2062
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:287
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:174
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:793
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1772
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:154
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:144
AVFormatContext
Format I/O context.
Definition: avformat.h:1107
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:625
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:864
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:294
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1169
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:880
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:880
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:639
InputStream::sub2video::w
int w
Definition: ffmpeg.h:359
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:146
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1143
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:326
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:160
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:106
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1154
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1212
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:866
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:81
InputFilterPriv::ist
InputStream * ist
Definition: ffmpeg_filter.c:74
filter_opt_apply
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:402
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:149
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
Definition: ffmpeg_filter.c:565
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1297
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1235
AVFilterGraph
Definition: avfilter.h:864
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:95
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:315
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:153
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:624
input_files
InputFile ** input_files
Definition: ffmpeg.c:123
OutputFile::streams
OutputStream ** streams
Definition: ffmpeg.h:623
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:42
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:138
FilterGraph
Definition: ffmpeg.h:306
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1224
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1032
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:279
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:869
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:1738
ifilter_sub2video
int ifilter_sub2video(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1884
f
f
Definition: af_crystalizer.c:121
AVIOContext
Bytestream IO Context.
Definition: avio.h:166
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:104
AVFifo
Definition: fifo.c:35
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:997
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:107
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:301
init_input_filter
static int init_input_filter(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:937
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:84
InputStream::file_index
int file_index
Definition: ffmpeg.h:323
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1029
output_files
OutputFile ** output_files
Definition: ffmpeg.c:126
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:328
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:138
FilterGraph::graph
AVFilterGraph * graph
Definition: ffmpeg.h:310
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1080
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
insert_trim
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1014
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:128
copy_ts
int copy_ts
Definition: ffmpeg_opt.c:78
graph_parse
static int graph_parse(AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:481
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1217
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
read_binary
static int read_binary(const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:354
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:51
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:429
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2270
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:53
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1039
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:563
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:94
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:441
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1522
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:225
FF_COMPLIANCE_UNOFFICIAL
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: defs.h:61
av_buffersink_get_type
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:300
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:938
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2222
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:331
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1546
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:904
buffersink.h
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:968
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
InputFilterPriv::fallback
struct InputFilterPriv::@4 fallback
bprint.h
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:95
configure_input_filter
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1502
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:244
configure_output_filter
static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1274
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:647
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:1744
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:47
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:176
src2
const pixel * src2
Definition: h264pred_template.c:422
display.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:403
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:649
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:64
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:89
tb
#define tb
Definition: regdef.h:68
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
ifilter_send_eof
int ifilter_send_eof(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:1917
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:129
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:346
AVCodecContext::height
int height
Definition: avcodec.h:621
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
AVFilter
Filter definition.
Definition: avfilter.h:166
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:132
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:95
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:841
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
reap_filters
int reap_filters(FilterGraph *fg, int flush)
Get and encode new output from specified filtergraph, without causing activity.
Definition: ffmpeg_filter.c:1838
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:288
av_opt_eval_int
int av_opt_eval_int(void *obj, const AVOption *o, const char *val, int *int_out)
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:912
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:419
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1065
InputStream::reinit_filters
int reinit_filters
Definition: ffmpeg.h:375
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1195
av_stream_get_side_data
uint8_t * av_stream_get_side_data(const AVStream *st, enum AVPacketSideDataType type, size_t *size)
Get side information from stream.
Definition: avformat.c:143
choose_channel_layouts
static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
Definition: ffmpeg_filter.c:334
InputFile::eagain
int eagain
Definition: ffmpeg.h:405
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:265
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:937
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1318
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:847
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:305
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:536
outputs
static const AVFilterPad outputs[]
Definition: af_afwtdn.c:1291
AVRational::den
int den
Denominator.
Definition: rational.h:60
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1210
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:102
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:87
av_bprint_clear
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:232
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:636
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:50
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:659
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:155
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:188
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1236
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:307
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:643
OutputFilter
Definition: ffmpeg.h:291
OutputFilterPriv::got_frame
int got_frame
Definition: ffmpeg_filter.c:158
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1512
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:70
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:636
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:449
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
init_simple_filtergraph
int init_simple_filtergraph(InputStream *ist, OutputStream *ost, char *graph_desc)
Definition: ffmpeg_filter.c:897
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:313
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:367
start_at_zero
int start_at_zero
Definition: ffmpeg_opt.c:79
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:90
InputStream::sub2video::h
int h
Definition: ffmpeg.h:359
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, int flush)
Definition: ffmpeg_filter.c:1768
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
OutputStream::autoscale
int autoscale
Definition: ffmpeg.h:552
InputStream::index
int index
Definition: ffmpeg.h:324
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:800
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:252
file_read
char * file_read(const char *filename)
Definition: ffmpeg_opt.c:750
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:92
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:125
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:130
d
d
Definition: ffmpeg_filter.c:331
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:621
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:234
imgutils.h
timestamp.h
OutputStream
Definition: mux.c:53
OutputStream::st
AVStream * st
Definition: mux.c:54
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:143
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1187
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
configure_output_video_filter
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1087
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2038
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:342
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:867
avstring.h
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:628
OutputStream::inputs_done
int inputs_done
Definition: ffmpeg.h:586
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1024
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:144
ifilter_send_frame
int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
Definition: ffmpeg_filter.c:1965
snprintf
#define snprintf
Definition: snprintf.h:34
enc_open
int enc_open(OutputStream *ost, AVFrame *frame)
Definition: ffmpeg_enc.c:285
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:807
buffersrc.h
enc_frame
int enc_frame(OutputStream *ost, AVFrame *frame)
Definition: ffmpeg_enc.c:1184
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:135
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1307
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:822
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2269
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1166
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:94
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2884
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:88
OutputFile
Definition: ffmpeg.h:615
InputStream::autorotate
int autorotate
Definition: ffmpeg.h:354