FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
30 
31 #include "libavutil/avassert.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/bprint.h"
35 #include "libavutil/display.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/samplefmt.h"
41 
42 static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
43 {
44  static const enum AVPixelFormat mjpeg_formats[] =
48  static const enum AVPixelFormat ljpeg_formats[] =
52  AV_PIX_FMT_NONE};
53 
54  if (codec_id == AV_CODEC_ID_MJPEG) {
55  return mjpeg_formats;
56  } else if (codec_id == AV_CODEC_ID_LJPEG) {
57  return ljpeg_formats;
58  } else {
59  return default_formats;
60  }
61 }
62 
64  const AVCodec *codec, enum AVPixelFormat target)
65 {
66  if (codec && codec->pix_fmts) {
67  const enum AVPixelFormat *p = codec->pix_fmts;
69  //FIXME: This should check for AV_PIX_FMT_FLAG_ALPHA after PAL8 pixel format without alpha is implemented
70  int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
71  enum AVPixelFormat best= AV_PIX_FMT_NONE;
72 
75  }
76  for (; *p != AV_PIX_FMT_NONE; p++) {
77  best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
78  if (*p == target)
79  break;
80  }
81  if (*p == AV_PIX_FMT_NONE) {
82  if (target != AV_PIX_FMT_NONE)
84  "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
85  av_get_pix_fmt_name(target),
86  codec->name,
87  av_get_pix_fmt_name(best));
88  return best;
89  }
90  }
91  return target;
92 }
93 
94 void choose_sample_fmt(AVStream *st, const AVCodec *codec)
95 {
96  if (codec && codec->sample_fmts) {
97  const enum AVSampleFormat *p = codec->sample_fmts;
98  for (; *p != -1; p++) {
99  if (*p == st->codecpar->format)
100  break;
101  }
102  if (*p == -1) {
105  av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
108  "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
110  codec->name,
112  st->codecpar->format = codec->sample_fmts[0];
113  }
114  }
115 }
116 
117 static char *choose_pix_fmts(OutputFilter *ofilter)
118 {
119  OutputStream *ost = ofilter->ost;
120  AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
121  if (strict_dict)
122  // used by choose_pixel_fmt() and below
123  av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
124 
125  if (ost->keep_pix_fmt) {
128  if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
129  return NULL;
131  }
132  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
133  return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
134  } else if (ost->enc && ost->enc->pix_fmts) {
135  const enum AVPixelFormat *p;
136  AVIOContext *s = NULL;
137  uint8_t *ret;
138  int len;
139 
140  if (avio_open_dyn_buf(&s) < 0)
141  exit_program(1);
142 
143  p = ost->enc->pix_fmts;
146  }
147 
148  for (; *p != AV_PIX_FMT_NONE; p++) {
149  const char *name = av_get_pix_fmt_name(*p);
150  avio_printf(s, "%s|", name);
151  }
152  len = avio_close_dyn_buf(s, &ret);
153  ret[len - 1] = 0;
154  return ret;
155  } else
156  return NULL;
157 }
158 
159 /* Define a function for building a string containing a list of
160  * allowed formats. */
161 #define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
162 static char *choose_ ## suffix (OutputFilter *ofilter) \
163 { \
164  if (ofilter->var != none) { \
165  get_name(ofilter->var); \
166  return av_strdup(name); \
167  } else if (ofilter->supported_list) { \
168  const type *p; \
169  AVIOContext *s = NULL; \
170  uint8_t *ret; \
171  int len; \
172  \
173  if (avio_open_dyn_buf(&s) < 0) \
174  exit_program(1); \
175  \
176  for (p = ofilter->supported_list; *p != none; p++) { \
177  get_name(*p); \
178  avio_printf(s, "%s|", name); \
179  } \
180  len = avio_close_dyn_buf(s, &ret); \
181  ret[len - 1] = 0; \
182  return ret; \
183  } else \
184  return NULL; \
185 }
186 
187 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
188 // GET_PIX_FMT_NAME)
189 
192 
195 
196 DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
198 
200 {
201  FilterGraph *fg = av_mallocz(sizeof(*fg));
202 
203  if (!fg)
204  exit_program(1);
205  fg->index = nb_filtergraphs;
206 
207  GROW_ARRAY(fg->outputs, fg->nb_outputs);
208  if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
209  exit_program(1);
210  fg->outputs[0]->ost = ost;
211  fg->outputs[0]->graph = fg;
212  fg->outputs[0]->format = -1;
213 
214  ost->filter = fg->outputs[0];
215 
216  GROW_ARRAY(fg->inputs, fg->nb_inputs);
217  if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
218  exit_program(1);
219  fg->inputs[0]->ist = ist;
220  fg->inputs[0]->graph = fg;
221  fg->inputs[0]->format = -1;
222 
223  fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
224  if (!fg->inputs[0]->frame_queue)
225  exit_program(1);
226 
227  GROW_ARRAY(ist->filters, ist->nb_filters);
228  ist->filters[ist->nb_filters - 1] = fg->inputs[0];
229 
231  filtergraphs[nb_filtergraphs - 1] = fg;
232 
233  return 0;
234 }
235 
236 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
237 {
238  AVFilterContext *ctx = inout->filter_ctx;
239  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
240  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
241  AVIOContext *pb;
242  uint8_t *res = NULL;
243 
244  if (avio_open_dyn_buf(&pb) < 0)
245  exit_program(1);
246 
247  avio_printf(pb, "%s", ctx->filter->name);
248  if (nb_pads > 1)
249  avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));
250  avio_w8(pb, 0);
251  avio_close_dyn_buf(pb, &res);
252  return res;
253 }
254 
256 {
257  InputStream *ist = NULL;
259  int i;
260 
261  // TODO: support other filter types
262  if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
263  av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
264  "currently.\n");
265  exit_program(1);
266  }
267 
268  if (in->name) {
270  AVStream *st = NULL;
271  char *p;
272  int file_idx = strtol(in->name, &p, 0);
273 
274  if (file_idx < 0 || file_idx >= nb_input_files) {
275  av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
276  file_idx, fg->graph_desc);
277  exit_program(1);
278  }
279  s = input_files[file_idx]->ctx;
280 
281  for (i = 0; i < s->nb_streams; i++) {
282  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
283  if (stream_type != type &&
284  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
285  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
286  continue;
287  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
288  st = s->streams[i];
289  break;
290  }
291  }
292  if (!st) {
293  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
294  "matches no streams.\n", p, fg->graph_desc);
295  exit_program(1);
296  }
297  ist = input_streams[input_files[file_idx]->ist_index + st->index];
298  if (ist->user_set_discard == AVDISCARD_ALL) {
299  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
300  "matches a disabled input stream.\n", p, fg->graph_desc);
301  exit_program(1);
302  }
303  } else {
304  /* find the first unused stream of corresponding type */
305  for (i = 0; i < nb_input_streams; i++) {
306  ist = input_streams[i];
307  if (ist->user_set_discard == AVDISCARD_ALL)
308  continue;
309  if (ist->dec_ctx->codec_type == type && ist->discard)
310  break;
311  }
312  if (i == nb_input_streams) {
313  av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
314  "unlabeled input pad %d on filter %s\n", in->pad_idx,
315  in->filter_ctx->name);
316  exit_program(1);
317  }
318  }
319  av_assert0(ist);
320 
321  ist->discard = 0;
323  ist->st->discard = AVDISCARD_NONE;
324 
325  GROW_ARRAY(fg->inputs, fg->nb_inputs);
326  if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
327  exit_program(1);
328  fg->inputs[fg->nb_inputs - 1]->ist = ist;
329  fg->inputs[fg->nb_inputs - 1]->graph = fg;
330  fg->inputs[fg->nb_inputs - 1]->format = -1;
331  fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
332  fg->inputs[fg->nb_inputs - 1]->name = describe_filter_link(fg, in, 1);
333 
334  fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
335  if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
336  exit_program(1);
337 
338  GROW_ARRAY(ist->filters, ist->nb_filters);
339  ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
340 }
341 
343 {
344  AVFilterInOut *inputs, *outputs, *cur;
345  AVFilterGraph *graph;
346  int ret = 0;
347 
348  /* this graph is only used for determining the kinds of inputs
349  * and outputs we have, and is discarded on exit from this function */
350  graph = avfilter_graph_alloc();
351  if (!graph)
352  return AVERROR(ENOMEM);
353  graph->nb_threads = 1;
354 
355  ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
356  if (ret < 0)
357  goto fail;
358 
359  for (cur = inputs; cur; cur = cur->next)
360  init_input_filter(fg, cur);
361 
362  for (cur = outputs; cur;) {
363  GROW_ARRAY(fg->outputs, fg->nb_outputs);
364  fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]));
365  if (!fg->outputs[fg->nb_outputs - 1])
366  exit_program(1);
367 
368  fg->outputs[fg->nb_outputs - 1]->graph = fg;
369  fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
371  cur->pad_idx);
372  fg->outputs[fg->nb_outputs - 1]->name = describe_filter_link(fg, cur, 0);
373  cur = cur->next;
374  fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
375  }
376 
377 fail:
378  avfilter_inout_free(&inputs);
379  avfilter_graph_free(&graph);
380  return ret;
381 }
382 
383 static int insert_trim(int64_t start_time, int64_t duration,
384  AVFilterContext **last_filter, int *pad_idx,
385  const char *filter_name)
386 {
387  AVFilterGraph *graph = (*last_filter)->graph;
389  const AVFilter *trim;
390  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
391  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
392  int ret = 0;
393 
394  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
395  return 0;
396 
397  trim = avfilter_get_by_name(name);
398  if (!trim) {
399  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
400  "recording time.\n", name);
402  }
403 
404  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
405  if (!ctx)
406  return AVERROR(ENOMEM);
407 
408  if (duration != INT64_MAX) {
409  ret = av_opt_set_int(ctx, "durationi", duration,
411  }
412  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
413  ret = av_opt_set_int(ctx, "starti", start_time,
415  }
416  if (ret < 0) {
417  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
418  return ret;
419  }
420 
421  ret = avfilter_init_str(ctx, NULL);
422  if (ret < 0)
423  return ret;
424 
425  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
426  if (ret < 0)
427  return ret;
428 
429  *last_filter = ctx;
430  *pad_idx = 0;
431  return 0;
432 }
433 
434 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
435  const char *filter_name, const char *args)
436 {
437  AVFilterGraph *graph = (*last_filter)->graph;
439  int ret;
440 
441  ret = avfilter_graph_create_filter(&ctx,
442  avfilter_get_by_name(filter_name),
443  filter_name, args, NULL, graph);
444  if (ret < 0)
445  return ret;
446 
447  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
448  if (ret < 0)
449  return ret;
450 
451  *last_filter = ctx;
452  *pad_idx = 0;
453  return 0;
454 }
455 
457 {
458  char *pix_fmts;
459  OutputStream *ost = ofilter->ost;
460  OutputFile *of = output_files[ost->file_index];
461  AVFilterContext *last_filter = out->filter_ctx;
462  int pad_idx = out->pad_idx;
463  int ret;
464  char name[255];
465 
466  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
467  ret = avfilter_graph_create_filter(&ofilter->filter,
468  avfilter_get_by_name("buffersink"),
469  name, NULL, NULL, fg->graph);
470 
471  if (ret < 0)
472  return ret;
473 
474  if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) {
475  char args[255];
477  AVDictionaryEntry *e = NULL;
478 
479  snprintf(args, sizeof(args), "%d:%d",
480  ofilter->width, ofilter->height);
481 
482  while ((e = av_dict_get(ost->sws_dict, "", e,
484  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
485  }
486 
487  snprintf(name, sizeof(name), "scaler_out_%d_%d",
488  ost->file_index, ost->index);
489  if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
490  name, args, NULL, fg->graph)) < 0)
491  return ret;
492  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
493  return ret;
494 
495  last_filter = filter;
496  pad_idx = 0;
497  }
498 
499  if ((pix_fmts = choose_pix_fmts(ofilter))) {
501  snprintf(name, sizeof(name), "format_out_%d_%d",
502  ost->file_index, ost->index);
503  ret = avfilter_graph_create_filter(&filter,
504  avfilter_get_by_name("format"),
505  "format", pix_fmts, NULL, fg->graph);
506  av_freep(&pix_fmts);
507  if (ret < 0)
508  return ret;
509  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
510  return ret;
511 
512  last_filter = filter;
513  pad_idx = 0;
514  }
515 
516  if (ost->frame_rate.num && 0) {
517  AVFilterContext *fps;
518  char args[255];
519 
520  snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
521  ost->frame_rate.den);
522  snprintf(name, sizeof(name), "fps_out_%d_%d",
523  ost->file_index, ost->index);
525  name, args, NULL, fg->graph);
526  if (ret < 0)
527  return ret;
528 
529  ret = avfilter_link(last_filter, pad_idx, fps, 0);
530  if (ret < 0)
531  return ret;
532  last_filter = fps;
533  pad_idx = 0;
534  }
535 
536  snprintf(name, sizeof(name), "trim_out_%d_%d",
537  ost->file_index, ost->index);
538  ret = insert_trim(of->start_time, of->recording_time,
539  &last_filter, &pad_idx, name);
540  if (ret < 0)
541  return ret;
542 
543 
544  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
545  return ret;
546 
547  return 0;
548 }
549 
551 {
552  OutputStream *ost = ofilter->ost;
553  OutputFile *of = output_files[ost->file_index];
554  AVCodecContext *codec = ost->enc_ctx;
555  AVFilterContext *last_filter = out->filter_ctx;
556  int pad_idx = out->pad_idx;
558  char name[255];
559  int ret;
560 
561  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
562  ret = avfilter_graph_create_filter(&ofilter->filter,
563  avfilter_get_by_name("abuffersink"),
564  name, NULL, NULL, fg->graph);
565  if (ret < 0)
566  return ret;
567  if ((ret = av_opt_set_int(ofilter->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
568  return ret;
569 
570 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
571  AVFilterContext *filt_ctx; \
572  \
573  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
574  "similarly to -af " filter_name "=%s.\n", arg); \
575  \
576  ret = avfilter_graph_create_filter(&filt_ctx, \
577  avfilter_get_by_name(filter_name), \
578  filter_name, arg, NULL, fg->graph); \
579  if (ret < 0) \
580  return ret; \
581  \
582  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
583  if (ret < 0) \
584  return ret; \
585  \
586  last_filter = filt_ctx; \
587  pad_idx = 0; \
588 } while (0)
589  if (ost->audio_channels_mapped) {
590  int i;
591  AVBPrint pan_buf;
592  av_bprint_init(&pan_buf, 256, 8192);
593  av_bprintf(&pan_buf, "0x%"PRIx64,
595  for (i = 0; i < ost->audio_channels_mapped; i++)
596  if (ost->audio_channels_map[i] != -1)
597  av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
598 
599  AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
600  av_bprint_finalize(&pan_buf, NULL);
601  }
602 
603  if (codec->channels && !codec->channel_layout)
605 
606  sample_fmts = choose_sample_fmts(ofilter);
607  sample_rates = choose_sample_rates(ofilter);
608  channel_layouts = choose_channel_layouts(ofilter);
609  if (sample_fmts || sample_rates || channel_layouts) {
611  char args[256];
612  args[0] = 0;
613 
614  if (sample_fmts)
615  av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
616  sample_fmts);
617  if (sample_rates)
618  av_strlcatf(args, sizeof(args), "sample_rates=%s:",
619  sample_rates);
620  if (channel_layouts)
621  av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
622  channel_layouts);
623 
624  av_freep(&sample_fmts);
625  av_freep(&sample_rates);
626  av_freep(&channel_layouts);
627 
628  snprintf(name, sizeof(name), "format_out_%d_%d",
629  ost->file_index, ost->index);
630  ret = avfilter_graph_create_filter(&format,
631  avfilter_get_by_name("aformat"),
632  name, args, NULL, fg->graph);
633  if (ret < 0)
634  return ret;
635 
636  ret = avfilter_link(last_filter, pad_idx, format, 0);
637  if (ret < 0)
638  return ret;
639 
640  last_filter = format;
641  pad_idx = 0;
642  }
643 
644  if (audio_volume != 256 && 0) {
645  char args[256];
646 
647  snprintf(args, sizeof(args), "%f", audio_volume / 256.);
648  AUTO_INSERT_FILTER("-vol", "volume", args);
649  }
650 
651  if (ost->apad && of->shortest) {
652  char args[256];
653  int i;
654 
655  for (i=0; i<of->ctx->nb_streams; i++)
657  break;
658 
659  if (i<of->ctx->nb_streams) {
660  snprintf(args, sizeof(args), "%s", ost->apad);
661  AUTO_INSERT_FILTER("-apad", "apad", args);
662  }
663  }
664 
665  snprintf(name, sizeof(name), "trim for output stream %d:%d",
666  ost->file_index, ost->index);
667  ret = insert_trim(of->start_time, of->recording_time,
668  &last_filter, &pad_idx, name);
669  if (ret < 0)
670  return ret;
671 
672  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
673  return ret;
674 
675  return 0;
676 }
677 
679 {
680  if (!ofilter->ost) {
681  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
682  exit_program(1);
683  }
684 
685  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
686  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
687  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
688  default: av_assert0(0);
689  }
690 }
691 
693 {
694  int i;
695  for (i = 0; i < nb_filtergraphs; i++) {
696  int n;
697  for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
699  if (!output->ost) {
700  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", output->name);
701  exit_program(1);
702  }
703  }
704  }
705 }
706 
707 static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
708 {
710  int i, w, h;
711 
712  /* Compute the size of the canvas for the subtitles stream.
713  If the subtitles codecpar has set a size, use it. Otherwise use the
714  maximum dimensions of the video streams in the same file. */
715  w = ifilter->width;
716  h = ifilter->height;
717  if (!(w && h)) {
718  for (i = 0; i < avf->nb_streams; i++) {
719  if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
720  w = FFMAX(w, avf->streams[i]->codecpar->width);
721  h = FFMAX(h, avf->streams[i]->codecpar->height);
722  }
723  }
724  if (!(w && h)) {
725  w = FFMAX(w, 720);
726  h = FFMAX(h, 576);
727  }
728  av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
729  }
730  ist->sub2video.w = ifilter->width = w;
731  ist->sub2video.h = ifilter->height = h;
732 
733  ifilter->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
734  ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
735 
736  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
737  palettes for all rectangles are identical or compatible */
738  ifilter->format = AV_PIX_FMT_RGB32;
739 
740  ist->sub2video.frame = av_frame_alloc();
741  if (!ist->sub2video.frame)
742  return AVERROR(ENOMEM);
743  ist->sub2video.last_pts = INT64_MIN;
744  ist->sub2video.end_pts = INT64_MIN;
745 
746  /* sub2video structure has been (re-)initialized.
747  Mark it as such so that the system will be
748  initialized with the first received heartbeat. */
749  ist->sub2video.initialize = 1;
750 
751  return 0;
752 }
753 
755  AVFilterInOut *in)
756 {
757  AVFilterContext *last_filter;
758  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
759  InputStream *ist = ifilter->ist;
761  AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
762  ist->st->time_base;
763  AVRational fr = ist->framerate;
764  AVRational sar;
765  AVBPrint args;
766  char name[255];
767  int ret, pad_idx = 0;
768  int64_t tsoffset = 0;
770 
771  if (!par)
772  return AVERROR(ENOMEM);
773  memset(par, 0, sizeof(*par));
774  par->format = AV_PIX_FMT_NONE;
775 
776  if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
777  av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
778  ret = AVERROR(EINVAL);
779  goto fail;
780  }
781 
782  if (!fr.num)
783  fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
784 
786  ret = sub2video_prepare(ist, ifilter);
787  if (ret < 0)
788  goto fail;
789  }
790 
791  sar = ifilter->sample_aspect_ratio;
792  if(!sar.den)
793  sar = (AVRational){0,1};
795  av_bprintf(&args,
796  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
797  "pixel_aspect=%d/%d",
798  ifilter->width, ifilter->height, ifilter->format,
799  tb.num, tb.den, sar.num, sar.den);
800  if (fr.num && fr.den)
801  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
802  snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
803  ist->file_index, ist->st->index);
804 
805 
806  if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
807  args.str, NULL, fg->graph)) < 0)
808  goto fail;
809  par->hw_frames_ctx = ifilter->hw_frames_ctx;
810  ret = av_buffersrc_parameters_set(ifilter->filter, par);
811  if (ret < 0)
812  goto fail;
813  av_freep(&par);
814  last_filter = ifilter->filter;
815 
816  if (ist->autorotate) {
817  double theta = get_rotation(ist->st);
818 
819  if (fabs(theta - 90) < 1.0) {
820  ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
821  } else if (fabs(theta - 180) < 1.0) {
822  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
823  if (ret < 0)
824  return ret;
825  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
826  } else if (fabs(theta - 270) < 1.0) {
827  ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
828  } else if (fabs(theta) > 1.0) {
829  char rotate_buf[64];
830  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
831  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
832  }
833  if (ret < 0)
834  return ret;
835  }
836 
837  if (do_deinterlace) {
838  AVFilterContext *yadif;
839 
840  snprintf(name, sizeof(name), "deinterlace_in_%d_%d",
841  ist->file_index, ist->st->index);
842  if ((ret = avfilter_graph_create_filter(&yadif,
843  avfilter_get_by_name("yadif"),
844  name, "", NULL,
845  fg->graph)) < 0)
846  return ret;
847 
848  if ((ret = avfilter_link(last_filter, 0, yadif, 0)) < 0)
849  return ret;
850 
851  last_filter = yadif;
852  }
853 
854  snprintf(name, sizeof(name), "trim_in_%d_%d",
855  ist->file_index, ist->st->index);
856  if (copy_ts) {
857  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
859  tsoffset += f->ctx->start_time;
860  }
861  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
862  AV_NOPTS_VALUE : tsoffset, f->recording_time,
863  &last_filter, &pad_idx, name);
864  if (ret < 0)
865  return ret;
866 
867  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
868  return ret;
869  return 0;
870 fail:
871  av_freep(&par);
872 
873  return ret;
874 }
875 
877  AVFilterInOut *in)
878 {
879  AVFilterContext *last_filter;
880  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
881  InputStream *ist = ifilter->ist;
883  AVBPrint args;
884  char name[255];
885  int ret, pad_idx = 0;
886  int64_t tsoffset = 0;
887 
888  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
889  av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
890  return AVERROR(EINVAL);
891  }
892 
894  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
895  1, ifilter->sample_rate,
896  ifilter->sample_rate,
897  av_get_sample_fmt_name(ifilter->format));
898  if (ifilter->channel_layout)
899  av_bprintf(&args, ":channel_layout=0x%"PRIx64,
900  ifilter->channel_layout);
901  else
902  av_bprintf(&args, ":channels=%d", ifilter->channels);
903  snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
904  ist->file_index, ist->st->index);
905 
906  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
907  name, args.str, NULL,
908  fg->graph)) < 0)
909  return ret;
910  last_filter = ifilter->filter;
911 
912 #define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
913  AVFilterContext *filt_ctx; \
914  \
915  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
916  "similarly to -af " filter_name "=%s.\n", arg); \
917  \
918  snprintf(name, sizeof(name), "graph_%d_%s_in_%d_%d", \
919  fg->index, filter_name, ist->file_index, ist->st->index); \
920  ret = avfilter_graph_create_filter(&filt_ctx, \
921  avfilter_get_by_name(filter_name), \
922  name, arg, NULL, fg->graph); \
923  if (ret < 0) \
924  return ret; \
925  \
926  ret = avfilter_link(last_filter, 0, filt_ctx, 0); \
927  if (ret < 0) \
928  return ret; \
929  \
930  last_filter = filt_ctx; \
931 } while (0)
932 
933  if (audio_sync_method > 0) {
934  char args[256] = {0};
935 
936  av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
937  if (audio_drift_threshold != 0.1)
938  av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
939  if (!fg->reconfiguration)
940  av_strlcatf(args, sizeof(args), ":first_pts=0");
941  AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
942  }
943 
944 // if (ost->audio_channels_mapped) {
945 // int i;
946 // AVBPrint pan_buf;
947 // av_bprint_init(&pan_buf, 256, 8192);
948 // av_bprintf(&pan_buf, "0x%"PRIx64,
949 // av_get_default_channel_layout(ost->audio_channels_mapped));
950 // for (i = 0; i < ost->audio_channels_mapped; i++)
951 // if (ost->audio_channels_map[i] != -1)
952 // av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
953 // AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
954 // av_bprint_finalize(&pan_buf, NULL);
955 // }
956 
957  if (audio_volume != 256) {
958  char args[256];
959 
960  av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
961  "audio filter instead.\n");
962 
963  snprintf(args, sizeof(args), "%f", audio_volume / 256.);
964  AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
965  }
966 
967  snprintf(name, sizeof(name), "trim for input stream %d:%d",
968  ist->file_index, ist->st->index);
969  if (copy_ts) {
970  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
972  tsoffset += f->ctx->start_time;
973  }
974  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
975  AV_NOPTS_VALUE : tsoffset, f->recording_time,
976  &last_filter, &pad_idx, name);
977  if (ret < 0)
978  return ret;
979 
980  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
981  return ret;
982 
983  return 0;
984 }
985 
987  AVFilterInOut *in)
988 {
989  if (!ifilter->ist->dec) {
991  "No decoder for stream #%d:%d, filtering impossible\n",
992  ifilter->ist->file_index, ifilter->ist->st->index);
994  }
995  switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
996  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
997  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
998  default: av_assert0(0);
999  }
1000 }
1001 
1003 {
1004  int i;
1005  for (i = 0; i < fg->nb_outputs; i++)
1006  fg->outputs[i]->filter = (AVFilterContext *)NULL;
1007  for (i = 0; i < fg->nb_inputs; i++)
1008  fg->inputs[i]->filter = (AVFilterContext *)NULL;
1009  avfilter_graph_free(&fg->graph);
1010 }
1011 
1013 {
1014  AVFilterInOut *inputs, *outputs, *cur;
1015  int ret, i, simple = filtergraph_is_simple(fg);
1016  const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
1017  fg->graph_desc;
1018 
1019  cleanup_filtergraph(fg);
1020  if (!(fg->graph = avfilter_graph_alloc()))
1021  return AVERROR(ENOMEM);
1022 
1023  if (simple) {
1024  OutputStream *ost = fg->outputs[0]->ost;
1025  char args[512];
1026  AVDictionaryEntry *e = NULL;
1027 
1029 
1030  args[0] = 0;
1031  while ((e = av_dict_get(ost->sws_dict, "", e,
1033  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1034  }
1035  if (strlen(args))
1036  args[strlen(args)-1] = 0;
1037  fg->graph->scale_sws_opts = av_strdup(args);
1038 
1039  args[0] = 0;
1040  while ((e = av_dict_get(ost->swr_opts, "", e,
1042  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1043  }
1044  if (strlen(args))
1045  args[strlen(args)-1] = 0;
1046  av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
1047 
1048  args[0] = '\0';
1049  while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, "", e,
1051  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1052  }
1053  if (strlen(args))
1054  args[strlen(args) - 1] = '\0';
1055 
1056  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1057  if (e)
1058  av_opt_set(fg->graph, "threads", e->value, 0);
1059  } else {
1061  }
1062 
1063  if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
1064  goto fail;
1065 
1066  ret = hw_device_setup_for_filter(fg);
1067  if (ret < 0)
1068  goto fail;
1069 
1070  if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
1071  const char *num_inputs;
1072  const char *num_outputs;
1073  if (!outputs) {
1074  num_outputs = "0";
1075  } else if (outputs->next) {
1076  num_outputs = ">1";
1077  } else {
1078  num_outputs = "1";
1079  }
1080  if (!inputs) {
1081  num_inputs = "0";
1082  } else if (inputs->next) {
1083  num_inputs = ">1";
1084  } else {
1085  num_inputs = "1";
1086  }
1087  av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1088  "to have exactly 1 input and 1 output."
1089  " However, it had %s input(s) and %s output(s)."
1090  " Please adjust, or use a complex filtergraph (-filter_complex) instead.\n",
1091  graph_desc, num_inputs, num_outputs);
1092  ret = AVERROR(EINVAL);
1093  goto fail;
1094  }
1095 
1096  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1097  if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
1098  avfilter_inout_free(&inputs);
1099  avfilter_inout_free(&outputs);
1100  goto fail;
1101  }
1102  avfilter_inout_free(&inputs);
1103 
1104  for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1105  configure_output_filter(fg, fg->outputs[i], cur);
1106  avfilter_inout_free(&outputs);
1107 
1110  if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1111  goto fail;
1112 
1113  /* limit the lists of allowed formats to the ones selected, to
1114  * make sure they stay the same if the filtergraph is reconfigured later */
1115  for (i = 0; i < fg->nb_outputs; i++) {
1116  OutputFilter *ofilter = fg->outputs[i];
1117  AVFilterContext *sink = ofilter->filter;
1118 
1119  ofilter->format = av_buffersink_get_format(sink);
1120 
1121  ofilter->width = av_buffersink_get_w(sink);
1122  ofilter->height = av_buffersink_get_h(sink);
1123 
1124  ofilter->sample_rate = av_buffersink_get_sample_rate(sink);
1126  }
1127 
1128  fg->reconfiguration = 1;
1129 
1130  for (i = 0; i < fg->nb_outputs; i++) {
1131  OutputStream *ost = fg->outputs[i]->ost;
1132  if (!ost->enc) {
1133  /* identical to the same check in ffmpeg.c, needed because
1134  complex filter graphs are initialized earlier */
1135  av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
1136  avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
1137  ret = AVERROR(EINVAL);
1138  goto fail;
1139  }
1140  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1141  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
1143  ost->enc_ctx->frame_size);
1144  }
1145 
1146  for (i = 0; i < fg->nb_inputs; i++) {
1147  while (av_fifo_size(fg->inputs[i]->frame_queue)) {
1148  AVFrame *tmp;
1149  av_fifo_generic_read(fg->inputs[i]->frame_queue, &tmp, sizeof(tmp), NULL);
1150  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, tmp);
1151  av_frame_free(&tmp);
1152  if (ret < 0)
1153  goto fail;
1154  }
1155  }
1156 
1157  /* send the EOFs for the finished inputs */
1158  for (i = 0; i < fg->nb_inputs; i++) {
1159  if (fg->inputs[i]->eof) {
1160  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, NULL);
1161  if (ret < 0)
1162  goto fail;
1163  }
1164  }
1165 
1166  /* process queued up subtitle packets */
1167  for (i = 0; i < fg->nb_inputs; i++) {
1168  InputStream *ist = fg->inputs[i]->ist;
1169  if (ist->sub2video.sub_queue && ist->sub2video.frame) {
1170  while (av_fifo_size(ist->sub2video.sub_queue)) {
1171  AVSubtitle tmp;
1172  av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
1173  sub2video_update(ist, INT64_MIN, &tmp);
1174  avsubtitle_free(&tmp);
1175  }
1176  }
1177  }
1178 
1179  return 0;
1180 
1181 fail:
1182  cleanup_filtergraph(fg);
1183  return ret;
1184 }
1185 
1187 {
1188  av_buffer_unref(&ifilter->hw_frames_ctx);
1189 
1190  ifilter->format = frame->format;
1191 
1192  ifilter->width = frame->width;
1193  ifilter->height = frame->height;
1194  ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
1195 
1196  ifilter->sample_rate = frame->sample_rate;
1197  ifilter->channels = frame->channels;
1198  ifilter->channel_layout = frame->channel_layout;
1199 
1200  if (frame->hw_frames_ctx) {
1201  ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
1202  if (!ifilter->hw_frames_ctx)
1203  return AVERROR(ENOMEM);
1204  }
1205 
1206  return 0;
1207 }
1208 
1210 {
1211  int i;
1212  for (i = 0; i < fg->nb_inputs; i++)
1213  if (fg->inputs[i]->ist == ist)
1214  return 1;
1215  return 0;
1216 }
1217 
1219 {
1220  return !fg->graph_desc;
1221 }
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
#define NULL
Definition: coverity.c:32
int width
Definition: ffmpeg.h:273
int keep_pix_fmt
Definition: ffmpeg.h:533
Bytestream IO Context.
Definition: avio.h:161
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:572
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:266
int nb_outputs
Definition: ffmpeg.h:295
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
AVDictionary * swr_opts
Definition: ffmpeg.h:514
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:305
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer)
Return the written size and a pointer to the buffer.
Definition: aviobuf.c:1428
AVRational frame_rate
Definition: ffmpeg.h:481
double get_rotation(AVStream *st)
Definition: cmdutils.c:2201
int accurate_seek
Definition: ffmpeg.h:417
const char * desc
Definition: libsvtav1.c:79
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
Main libavfilter public API header.
Memory buffer source API.
AVRational framerate
Definition: ffmpeg.h:336
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1029
int height
Definition: ffmpeg.h:250
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:2097
AVFilterInOut * out_tmp
Definition: ffmpeg.h:269
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:1050
int decoding_needed
Definition: ffmpeg.h:303
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
int num
Numerator.
Definition: rational.h:59
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
int index
stream index in AVFormatContext
Definition: avformat.h:881
int init_complex_filtergraph(FilterGraph *fg)
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
GLint GLenum type
Definition: opengl_enc.c:104
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
discard all
Definition: avcodec.h:236
int nb_input_streams
Definition: ffmpeg.c:148
#define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
static enum AVPixelFormat * get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
Definition: ffmpeg_filter.c:42
AVCodec.
Definition: codec.h:190
int avio_open_dyn_buf(AVIOContext **s)
Open a write only memory stream.
Definition: aviobuf.c:1383
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:573
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:286
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:860
void choose_sample_fmt(AVStream *st, const AVCodec *codec)
Definition: ffmpeg_filter.c:94
struct FilterGraph * graph
Definition: ffmpeg.h:241
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:653
Format I/O context.
Definition: avformat.h:1239
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, const AVCodec *codec, enum AVPixelFormat target)
Definition: ffmpeg_filter.c:63
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5068
int configure_filtergraph(FilterGraph *fg)
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:240
char * name
name of this filter instance
Definition: avfilter.h:346
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:289
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:352
int user_set_discard
Definition: ffmpeg.h:302
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
static int64_t start_time
Definition: ffplay.c:332
all automatic conversions disabled
Definition: avfilter.h:991
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AVDictionary * sws_dict
Definition: ffmpeg.h:513
int auto_conversion_filters
Definition: ffmpeg_opt.c:176
int width
Video only.
Definition: codec_par.h:126
void check_filter_outputs(void)
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
#define AV_CODEC_PROP_LOSSLESS
Codec supports lossless compression.
Definition: codec_desc.h:82
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
int audio_sync_method
Definition: ffmpeg_opt.c:154
int shortest
Definition: ffmpeg.h:576
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1307
int64_t duration
Definition: movenc.c:63
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
enum AVMediaType type
Definition: ffmpeg.h:243
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:884
AVDictionary * resample_opts
Definition: ffmpeg.h:515
AVFilterContext * filter
Definition: ffmpeg.h:263
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
int nb_input_files
Definition: ffmpeg.c:150
AVCodec * dec
Definition: ffmpeg.h:308
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs)
Add a graph described by a string to a graph.
Definition: graphparser.c:418
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
int file_index
Definition: ffmpeg.h:299
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
#define av_log(a,...)
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:174
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:240
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1598
A filter pad used for either input or output.
Definition: internal.h:54
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:355
uint64_t channel_layout
Definition: ffmpeg.h:277
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:352
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
enum AVCodecID id
Definition: codec.h:204
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:348
int hw_device_setup_for_filter(FilterGraph *fg)
Definition: ffmpeg_hw.c:525
AVRational sample_aspect_ratio
Definition: ffmpeg.h:251
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:46
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define GET_CH_LAYOUT_NAME(ch_layout)
Definition: cmdutils.h:638
int sample_rate
Definition: ffmpeg.h:253
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
AVFilterContext * filter
Definition: ffmpeg.h:239
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
int format
Definition: ffmpeg.h:248
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
unsigned nb_outputs
number of output pads
Definition: avfilter.h:354
Display matrix.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: codec.h:197
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int eof
Definition: ffmpeg.h:259
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format...
Definition: buffersrc.h:78
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:123
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:122
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1242
int filter_nbthreads
Definition: ffmpeg_opt.c:173
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
OutputFilter * filter
Definition: ffmpeg.h:507
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:491
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:526
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1295
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int channels
number of audio channels, only used for audio.
Definition: frame.h:620
audio channel layout utility functions
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:211
This structure contains the parameters describing the frames that will be passed to this filter...
Definition: buffersrc.h:73
unsigned nb_inputs
number of input pads
Definition: avfilter.h:350
external API header
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:560
struct OutputStream * ost
Definition: ffmpeg.h:264
int width
picture width / height.
Definition: avcodec.h:704
uint8_t w
Definition: llviddspenc.c:38
char * apad
Definition: ffmpeg.h:516
int width
Definition: ffmpeg.h:250
AVFormatContext * ctx
Definition: movenc.c:48
int nb_filtergraphs
Definition: ffmpeg.c:158
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int audio_channels_mapped
Definition: ffmpeg.h:502
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
enum AVCodecID codec_id
Definition: vaapi_decode.c:369
int height
Definition: ffmpeg.h:273
int sample_rate
Definition: ffmpeg.h:276
#define GET_SAMPLE_FMT_NAME(sample_fmt)
Definition: cmdutils.h:631
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1023
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:133
AVCodecContext * enc
Definition: muxing.c:55
int start_at_zero
Definition: ffmpeg_opt.c:163
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:935
int audio_volume
Definition: ffmpeg_opt.c:153
Stream structure.
Definition: avformat.h:880
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1018
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
InputFilter ** filters
Definition: ffmpeg.h:362
enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
Definition: imgconvert.c:54
#define GET_SAMPLE_RATE_NAME(rate)
Definition: cmdutils.h:634
int64_t recording_time
Definition: ffmpeg.h:412
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1211
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
AVStream * st
Definition: ffmpeg.h:300
sample_rate
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define AV_BPRINT_SIZE_AUTOMATIC
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
enum AVMediaType codec_type
Definition: avcodec.h:539
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
enum AVCodecID codec_id
Definition: avcodec.h:541
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
int ist_index
Definition: ffmpeg.h:401
const char * graph_desc
Definition: ffmpeg.h:287
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:410
void avio_w8(AVIOContext *s, int b)
Definition: aviobuf.c:203
main external API structure.
Definition: avcodec.h:531
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:372
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1118
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:402
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
AVCodecContext * enc_ctx
Definition: ffmpeg.h:469
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:257
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int channels
Definition: ffmpeg.h:254
int * audio_channels_map
Definition: ffmpeg.h:501
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:113
int sample_rate
Sample rate of the audio data.
Definition: frame.h:486
Filter definition.
Definition: avfilter.h:145
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1026
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:448
AVCodecContext * dec_ctx
Definition: ffmpeg.h:307
AVMediaType
Definition: avutil.h:199
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
const char * name
Filter name.
Definition: avfilter.h:149
int av_buffersink_get_w(const AVFilterContext *ctx)
int autorotate
Definition: ffmpeg.h:340
#define snprintf
Definition: snprintf.h:34
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:1045
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: codec_desc.h:38
float audio_drift_threshold
Definition: ffmpeg_opt.c:149
char * name
unique name for this input/output in the list
Definition: avfilter.h:1020
int nb_filters
Definition: ffmpeg.h:363
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1344
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:58
int autoscale
Definition: ffmpeg.h:486
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
static AVStream * ost
int reconfiguration
Definition: ffmpeg.h:290
struct FilterGraph * graph
Definition: ffmpeg.h:265
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
sample_rates
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:198
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:98
AVStream * st
Definition: muxing.c:54
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1204
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
uint64_t channel_layout
Definition: ffmpeg.h:255
int copy_ts
Definition: ffmpeg_opt.c:162
AVFormatContext * ctx
Definition: ffmpeg.h:398
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:52
int do_deinterlace
Definition: ffmpeg_opt.c:157
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:622
pixel format definitions
char * avfilter
Definition: ffmpeg.h:508
uint8_t * name
Definition: ffmpeg.h:242
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int len
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
int channels
number of audio channels
Definition: avcodec.h:1192
OutputFilter ** outputs
Definition: ffmpeg.h:294
InputFile ** input_files
Definition: ffmpeg.c:149
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:87
AVFormatContext * ctx
Definition: ffmpeg.h:569
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3457
int filtergraph_is_simple(FilterGraph *fg)
An instance of a filter.
Definition: avfilter.h:341
static char * choose_pix_fmts(OutputFilter *ofilter)
static void cleanup_filtergraph(FilterGraph *fg)
AVDictionary * encoder_opts
Definition: ffmpeg.h:512
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:940
int height
Definition: frame.h:372
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:292
#define av_freep(p)
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
OutputFile ** output_files
Definition: ffmpeg.c:154
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1045
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:213
int format
Definition: ffmpeg.h:275
formats
Definition: signature.h:48
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
int discard
Definition: ffmpeg.h:301
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:909
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int nb_inputs
Definition: ffmpeg.h:293
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:935
int index
Definition: ffmpeg.h:449
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
enum AVMediaType type
Definition: ffmpeg.h:270
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:245
#define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg)
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:465
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1594
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:344
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
InputStream ** input_streams
Definition: ffmpeg.c:147
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
Writes a formatted string to the context.
discard nothing
Definition: avcodec.h:230
const char * name
Definition: opengl_enc.c:102
static uint8_t tmp[11]
Definition: aes_ctr.c:26