FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
30 
31 #include "libavutil/avassert.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/bprint.h"
35 #include "libavutil/display.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/samplefmt.h"
41 
42 static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
43 {
44  static const enum AVPixelFormat mjpeg_formats[] =
48  static const enum AVPixelFormat ljpeg_formats[] =
52  AV_PIX_FMT_NONE};
53 
54  if (codec_id == AV_CODEC_ID_MJPEG) {
55  return mjpeg_formats;
56  } else if (codec_id == AV_CODEC_ID_LJPEG) {
57  return ljpeg_formats;
58  } else {
59  return default_formats;
60  }
61 }
62 
64 {
65  if (codec && codec->pix_fmts) {
66  const enum AVPixelFormat *p = codec->pix_fmts;
68  //FIXME: This should check for AV_PIX_FMT_FLAG_ALPHA after PAL8 pixel format without alpha is implemented
69  int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
70  enum AVPixelFormat best= AV_PIX_FMT_NONE;
71 
74  }
75  for (; *p != AV_PIX_FMT_NONE; p++) {
76  best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
77  if (*p == target)
78  break;
79  }
80  if (*p == AV_PIX_FMT_NONE) {
81  if (target != AV_PIX_FMT_NONE)
83  "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
84  av_get_pix_fmt_name(target),
85  codec->name,
86  av_get_pix_fmt_name(best));
87  return best;
88  }
89  }
90  return target;
91 }
92 
94 {
95  if (codec && codec->sample_fmts) {
96  const enum AVSampleFormat *p = codec->sample_fmts;
97  for (; *p != -1; p++) {
98  if (*p == st->codecpar->format)
99  break;
100  }
101  if (*p == -1) {
103  av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
106  "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
108  codec->name,
110  st->codecpar->format = codec->sample_fmts[0];
111  }
112  }
113 }
114 
115 static char *choose_pix_fmts(OutputFilter *ofilter)
116 {
117  OutputStream *ost = ofilter->ost;
118  AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
119  if (strict_dict)
120  // used by choose_pixel_fmt() and below
121  av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
122 
123  if (ost->keep_pix_fmt) {
126  if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
127  return NULL;
129  }
130  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
131  return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
132  } else if (ost->enc && ost->enc->pix_fmts) {
133  const enum AVPixelFormat *p;
134  AVIOContext *s = NULL;
135  uint8_t *ret;
136  int len;
137 
138  if (avio_open_dyn_buf(&s) < 0)
139  exit_program(1);
140 
141  p = ost->enc->pix_fmts;
144  }
145 
146  for (; *p != AV_PIX_FMT_NONE; p++) {
147  const char *name = av_get_pix_fmt_name(*p);
148  avio_printf(s, "%s|", name);
149  }
150  len = avio_close_dyn_buf(s, &ret);
151  ret[len - 1] = 0;
152  return ret;
153  } else
154  return NULL;
155 }
156 
157 /* Define a function for building a string containing a list of
158  * allowed formats. */
159 #define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
160 static char *choose_ ## suffix (OutputFilter *ofilter) \
161 { \
162  if (ofilter->var != none) { \
163  get_name(ofilter->var); \
164  return av_strdup(name); \
165  } else if (ofilter->supported_list) { \
166  const type *p; \
167  AVIOContext *s = NULL; \
168  uint8_t *ret; \
169  int len; \
170  \
171  if (avio_open_dyn_buf(&s) < 0) \
172  exit_program(1); \
173  \
174  for (p = ofilter->supported_list; *p != none; p++) { \
175  get_name(*p); \
176  avio_printf(s, "%s|", name); \
177  } \
178  len = avio_close_dyn_buf(s, &ret); \
179  ret[len - 1] = 0; \
180  return ret; \
181  } else \
182  return NULL; \
183 }
184 
185 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
186 // GET_PIX_FMT_NAME)
187 
190 
193 
194 DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
196 
198 {
199  FilterGraph *fg = av_mallocz(sizeof(*fg));
200 
201  if (!fg)
202  exit_program(1);
203  fg->index = nb_filtergraphs;
204 
205  GROW_ARRAY(fg->outputs, fg->nb_outputs);
206  if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
207  exit_program(1);
208  fg->outputs[0]->ost = ost;
209  fg->outputs[0]->graph = fg;
210  fg->outputs[0]->format = -1;
211 
212  ost->filter = fg->outputs[0];
213 
214  GROW_ARRAY(fg->inputs, fg->nb_inputs);
215  if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
216  exit_program(1);
217  fg->inputs[0]->ist = ist;
218  fg->inputs[0]->graph = fg;
219  fg->inputs[0]->format = -1;
220 
221  fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
222  if (!fg->inputs[0]->frame_queue)
223  exit_program(1);
224 
225  GROW_ARRAY(ist->filters, ist->nb_filters);
226  ist->filters[ist->nb_filters - 1] = fg->inputs[0];
227 
229  filtergraphs[nb_filtergraphs - 1] = fg;
230 
231  return 0;
232 }
233 
234 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
235 {
236  AVFilterContext *ctx = inout->filter_ctx;
237  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
238  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
239  AVIOContext *pb;
240  uint8_t *res = NULL;
241 
242  if (avio_open_dyn_buf(&pb) < 0)
243  exit_program(1);
244 
245  avio_printf(pb, "%s", ctx->filter->name);
246  if (nb_pads > 1)
247  avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));
248  avio_w8(pb, 0);
249  avio_close_dyn_buf(pb, &res);
250  return res;
251 }
252 
254 {
255  InputStream *ist = NULL;
257  int i;
258 
259  // TODO: support other filter types
260  if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
261  av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
262  "currently.\n");
263  exit_program(1);
264  }
265 
266  if (in->name) {
268  AVStream *st = NULL;
269  char *p;
270  int file_idx = strtol(in->name, &p, 0);
271 
272  if (file_idx < 0 || file_idx >= nb_input_files) {
273  av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
274  file_idx, fg->graph_desc);
275  exit_program(1);
276  }
277  s = input_files[file_idx]->ctx;
278 
279  for (i = 0; i < s->nb_streams; i++) {
280  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
281  if (stream_type != type &&
282  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
283  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
284  continue;
285  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
286  st = s->streams[i];
287  break;
288  }
289  }
290  if (!st) {
291  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
292  "matches no streams.\n", p, fg->graph_desc);
293  exit_program(1);
294  }
295  ist = input_streams[input_files[file_idx]->ist_index + st->index];
296  if (ist->user_set_discard == AVDISCARD_ALL) {
297  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
298  "matches a disabled input stream.\n", p, fg->graph_desc);
299  exit_program(1);
300  }
301  } else {
302  /* find the first unused stream of corresponding type */
303  for (i = 0; i < nb_input_streams; i++) {
304  ist = input_streams[i];
305  if (ist->user_set_discard == AVDISCARD_ALL)
306  continue;
307  if (ist->dec_ctx->codec_type == type && ist->discard)
308  break;
309  }
310  if (i == nb_input_streams) {
311  av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
312  "unlabeled input pad %d on filter %s\n", in->pad_idx,
313  in->filter_ctx->name);
314  exit_program(1);
315  }
316  }
317  av_assert0(ist);
318 
319  ist->discard = 0;
321  ist->st->discard = AVDISCARD_NONE;
322 
323  GROW_ARRAY(fg->inputs, fg->nb_inputs);
324  if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
325  exit_program(1);
326  fg->inputs[fg->nb_inputs - 1]->ist = ist;
327  fg->inputs[fg->nb_inputs - 1]->graph = fg;
328  fg->inputs[fg->nb_inputs - 1]->format = -1;
329  fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
330  fg->inputs[fg->nb_inputs - 1]->name = describe_filter_link(fg, in, 1);
331 
332  fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
333  if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
334  exit_program(1);
335 
336  GROW_ARRAY(ist->filters, ist->nb_filters);
337  ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
338 }
339 
341 {
342  AVFilterInOut *inputs, *outputs, *cur;
343  AVFilterGraph *graph;
344  int ret = 0;
345 
346  /* this graph is only used for determining the kinds of inputs
347  * and outputs we have, and is discarded on exit from this function */
348  graph = avfilter_graph_alloc();
349  if (!graph)
350  return AVERROR(ENOMEM);
351  graph->nb_threads = 1;
352 
353  ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
354  if (ret < 0)
355  goto fail;
356 
357  for (cur = inputs; cur; cur = cur->next)
358  init_input_filter(fg, cur);
359 
360  for (cur = outputs; cur;) {
361  GROW_ARRAY(fg->outputs, fg->nb_outputs);
362  fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]));
363  if (!fg->outputs[fg->nb_outputs - 1])
364  exit_program(1);
365 
366  fg->outputs[fg->nb_outputs - 1]->graph = fg;
367  fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
369  cur->pad_idx);
370  fg->outputs[fg->nb_outputs - 1]->name = describe_filter_link(fg, cur, 0);
371  cur = cur->next;
372  fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
373  }
374 
375 fail:
376  avfilter_inout_free(&inputs);
377  avfilter_graph_free(&graph);
378  return ret;
379 }
380 
381 static int insert_trim(int64_t start_time, int64_t duration,
382  AVFilterContext **last_filter, int *pad_idx,
383  const char *filter_name)
384 {
385  AVFilterGraph *graph = (*last_filter)->graph;
387  const AVFilter *trim;
388  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
389  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
390  int ret = 0;
391 
392  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
393  return 0;
394 
395  trim = avfilter_get_by_name(name);
396  if (!trim) {
397  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
398  "recording time.\n", name);
400  }
401 
402  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
403  if (!ctx)
404  return AVERROR(ENOMEM);
405 
406  if (duration != INT64_MAX) {
407  ret = av_opt_set_int(ctx, "durationi", duration,
409  }
410  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
411  ret = av_opt_set_int(ctx, "starti", start_time,
413  }
414  if (ret < 0) {
415  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
416  return ret;
417  }
418 
419  ret = avfilter_init_str(ctx, NULL);
420  if (ret < 0)
421  return ret;
422 
423  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
424  if (ret < 0)
425  return ret;
426 
427  *last_filter = ctx;
428  *pad_idx = 0;
429  return 0;
430 }
431 
432 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
433  const char *filter_name, const char *args)
434 {
435  AVFilterGraph *graph = (*last_filter)->graph;
437  int ret;
438 
439  ret = avfilter_graph_create_filter(&ctx,
440  avfilter_get_by_name(filter_name),
441  filter_name, args, NULL, graph);
442  if (ret < 0)
443  return ret;
444 
445  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
446  if (ret < 0)
447  return ret;
448 
449  *last_filter = ctx;
450  *pad_idx = 0;
451  return 0;
452 }
453 
455 {
456  char *pix_fmts;
457  OutputStream *ost = ofilter->ost;
458  OutputFile *of = output_files[ost->file_index];
459  AVFilterContext *last_filter = out->filter_ctx;
460  int pad_idx = out->pad_idx;
461  int ret;
462  char name[255];
463 
464  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
465  ret = avfilter_graph_create_filter(&ofilter->filter,
466  avfilter_get_by_name("buffersink"),
467  name, NULL, NULL, fg->graph);
468 
469  if (ret < 0)
470  return ret;
471 
472  if (ofilter->width || ofilter->height) {
473  char args[255];
475  AVDictionaryEntry *e = NULL;
476 
477  snprintf(args, sizeof(args), "%d:%d",
478  ofilter->width, ofilter->height);
479 
480  while ((e = av_dict_get(ost->sws_dict, "", e,
482  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
483  }
484 
485  snprintf(name, sizeof(name), "scaler_out_%d_%d",
486  ost->file_index, ost->index);
487  if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
488  name, args, NULL, fg->graph)) < 0)
489  return ret;
490  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
491  return ret;
492 
493  last_filter = filter;
494  pad_idx = 0;
495  }
496 
497  if ((pix_fmts = choose_pix_fmts(ofilter))) {
499  snprintf(name, sizeof(name), "format_out_%d_%d",
500  ost->file_index, ost->index);
501  ret = avfilter_graph_create_filter(&filter,
502  avfilter_get_by_name("format"),
503  "format", pix_fmts, NULL, fg->graph);
504  av_freep(&pix_fmts);
505  if (ret < 0)
506  return ret;
507  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
508  return ret;
509 
510  last_filter = filter;
511  pad_idx = 0;
512  }
513 
514  if (ost->frame_rate.num && 0) {
515  AVFilterContext *fps;
516  char args[255];
517 
518  snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
519  ost->frame_rate.den);
520  snprintf(name, sizeof(name), "fps_out_%d_%d",
521  ost->file_index, ost->index);
523  name, args, NULL, fg->graph);
524  if (ret < 0)
525  return ret;
526 
527  ret = avfilter_link(last_filter, pad_idx, fps, 0);
528  if (ret < 0)
529  return ret;
530  last_filter = fps;
531  pad_idx = 0;
532  }
533 
534  snprintf(name, sizeof(name), "trim_out_%d_%d",
535  ost->file_index, ost->index);
536  ret = insert_trim(of->start_time, of->recording_time,
537  &last_filter, &pad_idx, name);
538  if (ret < 0)
539  return ret;
540 
541 
542  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
543  return ret;
544 
545  return 0;
546 }
547 
549 {
550  OutputStream *ost = ofilter->ost;
551  OutputFile *of = output_files[ost->file_index];
552  AVCodecContext *codec = ost->enc_ctx;
553  AVFilterContext *last_filter = out->filter_ctx;
554  int pad_idx = out->pad_idx;
556  char name[255];
557  int ret;
558 
559  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
560  ret = avfilter_graph_create_filter(&ofilter->filter,
561  avfilter_get_by_name("abuffersink"),
562  name, NULL, NULL, fg->graph);
563  if (ret < 0)
564  return ret;
565  if ((ret = av_opt_set_int(ofilter->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
566  return ret;
567 
568 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
569  AVFilterContext *filt_ctx; \
570  \
571  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
572  "similarly to -af " filter_name "=%s.\n", arg); \
573  \
574  ret = avfilter_graph_create_filter(&filt_ctx, \
575  avfilter_get_by_name(filter_name), \
576  filter_name, arg, NULL, fg->graph); \
577  if (ret < 0) \
578  return ret; \
579  \
580  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
581  if (ret < 0) \
582  return ret; \
583  \
584  last_filter = filt_ctx; \
585  pad_idx = 0; \
586 } while (0)
587  if (ost->audio_channels_mapped) {
588  int i;
589  AVBPrint pan_buf;
590  av_bprint_init(&pan_buf, 256, 8192);
591  av_bprintf(&pan_buf, "0x%"PRIx64,
593  for (i = 0; i < ost->audio_channels_mapped; i++)
594  if (ost->audio_channels_map[i] != -1)
595  av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
596 
597  AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
598  av_bprint_finalize(&pan_buf, NULL);
599  }
600 
601  if (codec->channels && !codec->channel_layout)
603 
604  sample_fmts = choose_sample_fmts(ofilter);
605  sample_rates = choose_sample_rates(ofilter);
606  channel_layouts = choose_channel_layouts(ofilter);
607  if (sample_fmts || sample_rates || channel_layouts) {
609  char args[256];
610  args[0] = 0;
611 
612  if (sample_fmts)
613  av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
614  sample_fmts);
615  if (sample_rates)
616  av_strlcatf(args, sizeof(args), "sample_rates=%s:",
617  sample_rates);
618  if (channel_layouts)
619  av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
620  channel_layouts);
621 
622  av_freep(&sample_fmts);
623  av_freep(&sample_rates);
624  av_freep(&channel_layouts);
625 
626  snprintf(name, sizeof(name), "format_out_%d_%d",
627  ost->file_index, ost->index);
628  ret = avfilter_graph_create_filter(&format,
629  avfilter_get_by_name("aformat"),
630  name, args, NULL, fg->graph);
631  if (ret < 0)
632  return ret;
633 
634  ret = avfilter_link(last_filter, pad_idx, format, 0);
635  if (ret < 0)
636  return ret;
637 
638  last_filter = format;
639  pad_idx = 0;
640  }
641 
642  if (audio_volume != 256 && 0) {
643  char args[256];
644 
645  snprintf(args, sizeof(args), "%f", audio_volume / 256.);
646  AUTO_INSERT_FILTER("-vol", "volume", args);
647  }
648 
649  if (ost->apad && of->shortest) {
650  char args[256];
651  int i;
652 
653  for (i=0; i<of->ctx->nb_streams; i++)
655  break;
656 
657  if (i<of->ctx->nb_streams) {
658  snprintf(args, sizeof(args), "%s", ost->apad);
659  AUTO_INSERT_FILTER("-apad", "apad", args);
660  }
661  }
662 
663  snprintf(name, sizeof(name), "trim for output stream %d:%d",
664  ost->file_index, ost->index);
665  ret = insert_trim(of->start_time, of->recording_time,
666  &last_filter, &pad_idx, name);
667  if (ret < 0)
668  return ret;
669 
670  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
671  return ret;
672 
673  return 0;
674 }
675 
677 {
678  if (!ofilter->ost) {
679  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
680  exit_program(1);
681  }
682 
683  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
684  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
685  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
686  default: av_assert0(0);
687  }
688 }
689 
691 {
692  int i;
693  for (i = 0; i < nb_filtergraphs; i++) {
694  int n;
695  for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
697  if (!output->ost) {
698  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", output->name);
699  exit_program(1);
700  }
701  }
702  }
703 }
704 
705 static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
706 {
708  int i, w, h;
709 
710  /* Compute the size of the canvas for the subtitles stream.
711  If the subtitles codecpar has set a size, use it. Otherwise use the
712  maximum dimensions of the video streams in the same file. */
713  w = ifilter->width;
714  h = ifilter->height;
715  if (!(w && h)) {
716  for (i = 0; i < avf->nb_streams; i++) {
717  if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
718  w = FFMAX(w, avf->streams[i]->codecpar->width);
719  h = FFMAX(h, avf->streams[i]->codecpar->height);
720  }
721  }
722  if (!(w && h)) {
723  w = FFMAX(w, 720);
724  h = FFMAX(h, 576);
725  }
726  av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
727  }
728  ist->sub2video.w = ifilter->width = w;
729  ist->sub2video.h = ifilter->height = h;
730 
731  ifilter->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
732  ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
733 
734  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
735  palettes for all rectangles are identical or compatible */
736  ifilter->format = AV_PIX_FMT_RGB32;
737 
738  ist->sub2video.frame = av_frame_alloc();
739  if (!ist->sub2video.frame)
740  return AVERROR(ENOMEM);
741  ist->sub2video.last_pts = INT64_MIN;
742  ist->sub2video.end_pts = INT64_MIN;
743 
744  /* sub2video structure has been (re-)initialized.
745  Mark it as such so that the system will be
746  initialized with the first received heartbeat. */
747  ist->sub2video.initialize = 1;
748 
749  return 0;
750 }
751 
753  AVFilterInOut *in)
754 {
755  AVFilterContext *last_filter;
756  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
757  InputStream *ist = ifilter->ist;
759  AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
760  ist->st->time_base;
761  AVRational fr = ist->framerate;
762  AVRational sar;
763  AVBPrint args;
764  char name[255];
765  int ret, pad_idx = 0;
766  int64_t tsoffset = 0;
768 
769  if (!par)
770  return AVERROR(ENOMEM);
771  memset(par, 0, sizeof(*par));
772  par->format = AV_PIX_FMT_NONE;
773 
774  if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
775  av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
776  ret = AVERROR(EINVAL);
777  goto fail;
778  }
779 
780  if (!fr.num)
781  fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
782 
784  ret = sub2video_prepare(ist, ifilter);
785  if (ret < 0)
786  goto fail;
787  }
788 
789  sar = ifilter->sample_aspect_ratio;
790  if(!sar.den)
791  sar = (AVRational){0,1};
793  av_bprintf(&args,
794  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
795  "pixel_aspect=%d/%d",
796  ifilter->width, ifilter->height, ifilter->format,
797  tb.num, tb.den, sar.num, sar.den);
798  if (fr.num && fr.den)
799  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
800  snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
801  ist->file_index, ist->st->index);
802 
803 
804  if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
805  args.str, NULL, fg->graph)) < 0)
806  goto fail;
807  par->hw_frames_ctx = ifilter->hw_frames_ctx;
808  ret = av_buffersrc_parameters_set(ifilter->filter, par);
809  if (ret < 0)
810  goto fail;
811  av_freep(&par);
812  last_filter = ifilter->filter;
813 
814  if (ist->autorotate) {
815  double theta = get_rotation(ist->st);
816 
817  if (fabs(theta - 90) < 1.0) {
818  ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
819  } else if (fabs(theta - 180) < 1.0) {
820  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
821  if (ret < 0)
822  return ret;
823  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
824  } else if (fabs(theta - 270) < 1.0) {
825  ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
826  } else if (fabs(theta) > 1.0) {
827  char rotate_buf[64];
828  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
829  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
830  }
831  if (ret < 0)
832  return ret;
833  }
834 
835  if (do_deinterlace) {
836  AVFilterContext *yadif;
837 
838  snprintf(name, sizeof(name), "deinterlace_in_%d_%d",
839  ist->file_index, ist->st->index);
840  if ((ret = avfilter_graph_create_filter(&yadif,
841  avfilter_get_by_name("yadif"),
842  name, "", NULL,
843  fg->graph)) < 0)
844  return ret;
845 
846  if ((ret = avfilter_link(last_filter, 0, yadif, 0)) < 0)
847  return ret;
848 
849  last_filter = yadif;
850  }
851 
852  snprintf(name, sizeof(name), "trim_in_%d_%d",
853  ist->file_index, ist->st->index);
854  if (copy_ts) {
855  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
857  tsoffset += f->ctx->start_time;
858  }
859  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
860  AV_NOPTS_VALUE : tsoffset, f->recording_time,
861  &last_filter, &pad_idx, name);
862  if (ret < 0)
863  return ret;
864 
865  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
866  return ret;
867  return 0;
868 fail:
869  av_freep(&par);
870 
871  return ret;
872 }
873 
875  AVFilterInOut *in)
876 {
877  AVFilterContext *last_filter;
878  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
879  InputStream *ist = ifilter->ist;
881  AVBPrint args;
882  char name[255];
883  int ret, pad_idx = 0;
884  int64_t tsoffset = 0;
885 
886  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
887  av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
888  return AVERROR(EINVAL);
889  }
890 
892  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
893  1, ifilter->sample_rate,
894  ifilter->sample_rate,
895  av_get_sample_fmt_name(ifilter->format));
896  if (ifilter->channel_layout)
897  av_bprintf(&args, ":channel_layout=0x%"PRIx64,
898  ifilter->channel_layout);
899  else
900  av_bprintf(&args, ":channels=%d", ifilter->channels);
901  snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
902  ist->file_index, ist->st->index);
903 
904  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
905  name, args.str, NULL,
906  fg->graph)) < 0)
907  return ret;
908  last_filter = ifilter->filter;
909 
910 #define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
911  AVFilterContext *filt_ctx; \
912  \
913  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
914  "similarly to -af " filter_name "=%s.\n", arg); \
915  \
916  snprintf(name, sizeof(name), "graph_%d_%s_in_%d_%d", \
917  fg->index, filter_name, ist->file_index, ist->st->index); \
918  ret = avfilter_graph_create_filter(&filt_ctx, \
919  avfilter_get_by_name(filter_name), \
920  name, arg, NULL, fg->graph); \
921  if (ret < 0) \
922  return ret; \
923  \
924  ret = avfilter_link(last_filter, 0, filt_ctx, 0); \
925  if (ret < 0) \
926  return ret; \
927  \
928  last_filter = filt_ctx; \
929 } while (0)
930 
931  if (audio_sync_method > 0) {
932  char args[256] = {0};
933 
934  av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
935  if (audio_drift_threshold != 0.1)
936  av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
937  if (!fg->reconfiguration)
938  av_strlcatf(args, sizeof(args), ":first_pts=0");
939  AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
940  }
941 
942 // if (ost->audio_channels_mapped) {
943 // int i;
944 // AVBPrint pan_buf;
945 // av_bprint_init(&pan_buf, 256, 8192);
946 // av_bprintf(&pan_buf, "0x%"PRIx64,
947 // av_get_default_channel_layout(ost->audio_channels_mapped));
948 // for (i = 0; i < ost->audio_channels_mapped; i++)
949 // if (ost->audio_channels_map[i] != -1)
950 // av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
951 // AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
952 // av_bprint_finalize(&pan_buf, NULL);
953 // }
954 
955  if (audio_volume != 256) {
956  char args[256];
957 
958  av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
959  "audio filter instead.\n");
960 
961  snprintf(args, sizeof(args), "%f", audio_volume / 256.);
962  AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
963  }
964 
965  snprintf(name, sizeof(name), "trim for input stream %d:%d",
966  ist->file_index, ist->st->index);
967  if (copy_ts) {
968  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
970  tsoffset += f->ctx->start_time;
971  }
972  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
973  AV_NOPTS_VALUE : tsoffset, f->recording_time,
974  &last_filter, &pad_idx, name);
975  if (ret < 0)
976  return ret;
977 
978  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
979  return ret;
980 
981  return 0;
982 }
983 
985  AVFilterInOut *in)
986 {
987  if (!ifilter->ist->dec) {
989  "No decoder for stream #%d:%d, filtering impossible\n",
990  ifilter->ist->file_index, ifilter->ist->st->index);
992  }
993  switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
994  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
995  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
996  default: av_assert0(0);
997  }
998 }
999 
1001 {
1002  int i;
1003  for (i = 0; i < fg->nb_outputs; i++)
1004  fg->outputs[i]->filter = (AVFilterContext *)NULL;
1005  for (i = 0; i < fg->nb_inputs; i++)
1006  fg->inputs[i]->filter = (AVFilterContext *)NULL;
1007  avfilter_graph_free(&fg->graph);
1008 }
1009 
1011 {
1012  AVFilterInOut *inputs, *outputs, *cur;
1013  int ret, i, simple = filtergraph_is_simple(fg);
1014  const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
1015  fg->graph_desc;
1016 
1017  cleanup_filtergraph(fg);
1018  if (!(fg->graph = avfilter_graph_alloc()))
1019  return AVERROR(ENOMEM);
1020 
1021  if (simple) {
1022  OutputStream *ost = fg->outputs[0]->ost;
1023  char args[512];
1024  AVDictionaryEntry *e = NULL;
1025 
1027 
1028  args[0] = 0;
1029  while ((e = av_dict_get(ost->sws_dict, "", e,
1031  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1032  }
1033  if (strlen(args))
1034  args[strlen(args)-1] = 0;
1035  fg->graph->scale_sws_opts = av_strdup(args);
1036 
1037  args[0] = 0;
1038  while ((e = av_dict_get(ost->swr_opts, "", e,
1040  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1041  }
1042  if (strlen(args))
1043  args[strlen(args)-1] = 0;
1044  av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
1045 
1046  args[0] = '\0';
1047  while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, "", e,
1049  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1050  }
1051  if (strlen(args))
1052  args[strlen(args) - 1] = '\0';
1053 
1054  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1055  if (e)
1056  av_opt_set(fg->graph, "threads", e->value, 0);
1057  } else {
1059  }
1060 
1061  if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
1062  goto fail;
1063 
1066  : hw_device_ctx;
1067  for (i = 0; i < fg->graph->nb_filters; i++) {
1068  fg->graph->filters[i]->hw_device_ctx = av_buffer_ref(device);
1069  if (!fg->graph->filters[i]->hw_device_ctx) {
1070  ret = AVERROR(ENOMEM);
1071  goto fail;
1072  }
1073  }
1074  }
1075 
1076  if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
1077  const char *num_inputs;
1078  const char *num_outputs;
1079  if (!outputs) {
1080  num_outputs = "0";
1081  } else if (outputs->next) {
1082  num_outputs = ">1";
1083  } else {
1084  num_outputs = "1";
1085  }
1086  if (!inputs) {
1087  num_inputs = "0";
1088  } else if (inputs->next) {
1089  num_inputs = ">1";
1090  } else {
1091  num_inputs = "1";
1092  }
1093  av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1094  "to have exactly 1 input and 1 output."
1095  " However, it had %s input(s) and %s output(s)."
1096  " Please adjust, or use a complex filtergraph (-filter_complex) instead.\n",
1097  graph_desc, num_inputs, num_outputs);
1098  ret = AVERROR(EINVAL);
1099  goto fail;
1100  }
1101 
1102  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1103  if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
1104  avfilter_inout_free(&inputs);
1105  avfilter_inout_free(&outputs);
1106  goto fail;
1107  }
1108  avfilter_inout_free(&inputs);
1109 
1110  for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1111  configure_output_filter(fg, fg->outputs[i], cur);
1112  avfilter_inout_free(&outputs);
1113 
1114  if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1115  goto fail;
1116 
1117  /* limit the lists of allowed formats to the ones selected, to
1118  * make sure they stay the same if the filtergraph is reconfigured later */
1119  for (i = 0; i < fg->nb_outputs; i++) {
1120  OutputFilter *ofilter = fg->outputs[i];
1121  AVFilterContext *sink = ofilter->filter;
1122 
1123  ofilter->format = av_buffersink_get_format(sink);
1124 
1125  ofilter->width = av_buffersink_get_w(sink);
1126  ofilter->height = av_buffersink_get_h(sink);
1127 
1128  ofilter->sample_rate = av_buffersink_get_sample_rate(sink);
1130  }
1131 
1132  fg->reconfiguration = 1;
1133 
1134  for (i = 0; i < fg->nb_outputs; i++) {
1135  OutputStream *ost = fg->outputs[i]->ost;
1136  if (!ost->enc) {
1137  /* identical to the same check in ffmpeg.c, needed because
1138  complex filter graphs are initialized earlier */
1139  av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
1140  avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
1141  ret = AVERROR(EINVAL);
1142  goto fail;
1143  }
1144  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1145  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
1147  ost->enc_ctx->frame_size);
1148  }
1149 
1150  for (i = 0; i < fg->nb_inputs; i++) {
1151  while (av_fifo_size(fg->inputs[i]->frame_queue)) {
1152  AVFrame *tmp;
1153  av_fifo_generic_read(fg->inputs[i]->frame_queue, &tmp, sizeof(tmp), NULL);
1154  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, tmp);
1155  av_frame_free(&tmp);
1156  if (ret < 0)
1157  goto fail;
1158  }
1159  }
1160 
1161  /* send the EOFs for the finished inputs */
1162  for (i = 0; i < fg->nb_inputs; i++) {
1163  if (fg->inputs[i]->eof) {
1164  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, NULL);
1165  if (ret < 0)
1166  goto fail;
1167  }
1168  }
1169 
1170  /* process queued up subtitle packets */
1171  for (i = 0; i < fg->nb_inputs; i++) {
1172  InputStream *ist = fg->inputs[i]->ist;
1173  if (ist->sub2video.sub_queue && ist->sub2video.frame) {
1174  while (av_fifo_size(ist->sub2video.sub_queue)) {
1175  AVSubtitle tmp;
1176  av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
1177  sub2video_update(ist, INT64_MIN, &tmp);
1178  avsubtitle_free(&tmp);
1179  }
1180  }
1181  }
1182 
1183  return 0;
1184 
1185 fail:
1186  cleanup_filtergraph(fg);
1187  return ret;
1188 }
1189 
1191 {
1192  av_buffer_unref(&ifilter->hw_frames_ctx);
1193 
1194  ifilter->format = frame->format;
1195 
1196  ifilter->width = frame->width;
1197  ifilter->height = frame->height;
1198  ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
1199 
1200  ifilter->sample_rate = frame->sample_rate;
1201  ifilter->channels = frame->channels;
1202  ifilter->channel_layout = frame->channel_layout;
1203 
1204  if (frame->hw_frames_ctx) {
1205  ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
1206  if (!ifilter->hw_frames_ctx)
1207  return AVERROR(ENOMEM);
1208  }
1209 
1210  return 0;
1211 }
1212 
1214 {
1215  int i;
1216  for (i = 0; i < fg->nb_inputs; i++)
1217  if (fg->inputs[i]->ist == ist)
1218  return 1;
1219  return 0;
1220 }
1221 
1223 {
1224  return !fg->graph_desc;
1225 }
AVFilterContext ** filters
Definition: avfilter.h:842
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
#define NULL
Definition: coverity.c:32
int width
Definition: ffmpeg.h:269
int keep_pix_fmt
Definition: ffmpeg.h:528
Bytestream IO Context.
Definition: avio.h:161
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:558
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:262
int nb_outputs
Definition: ffmpeg.h:291
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:126
AVDictionary * swr_opts
Definition: ffmpeg.h:509
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:301
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer)
Return the written size and a pointer to the buffer.
Definition: aviobuf.c:1421
AVRational frame_rate
Definition: ffmpeg.h:477
HWDevice * filter_hw_device
Definition: ffmpeg_opt.c:143
double get_rotation(AVStream *st)
Definition: cmdutils.c:2202
int accurate_seek
Definition: ffmpeg.h:413
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Main libavfilter public API header.
Memory buffer source API.
const char * desc
Definition: nvenc.c:68
AVRational framerate
Definition: ffmpeg.h:332
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
int height
Definition: ffmpeg.h:246
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:2098
AVFilterInOut * out_tmp
Definition: ffmpeg.h:265
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:1034
int decoding_needed
Definition: ffmpeg.h:299
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:4036
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
int num
Numerator.
Definition: rational.h:59
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
int index
stream index in AVFormatContext
Definition: avformat.h:877
int init_complex_filtergraph(FilterGraph *fg)
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
AVBufferRef * hw_device_ctx
For filters which will create hardware frames, sets the device the filter should create them in...
Definition: avfilter.h:394
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1831
GLint GLenum type
Definition: opengl_enc.c:104
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
discard all
Definition: avcodec.h:829
int nb_input_streams
Definition: ffmpeg.c:148
#define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
static enum AVPixelFormat * get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
Definition: ffmpeg_filter.c:42
AVCodec.
Definition: avcodec.h:3555
int avio_open_dyn_buf(AVIOContext **s)
Open a write only memory stream.
Definition: aviobuf.c:1376
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:559
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:282
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
struct FilterGraph * graph
Definition: ffmpeg.h:237
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:634
Format I/O context.
Definition: avformat.h:1353
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5140
int configure_filtergraph(FilterGraph *fg)
memory buffer sink API for audio and video
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCodec *codec, enum AVPixelFormat target)
Definition: ffmpeg_filter.c:63
struct InputStream * ist
Definition: ffmpeg.h:236
char * name
name of this filter instance
Definition: avfilter.h:343
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:285
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
int user_set_discard
Definition: ffmpeg.h:298
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
static int64_t start_time
Definition: ffplay.c:332
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AVDictionary * sws_dict
Definition: ffmpeg.h:508
int width
Video only.
Definition: avcodec.h:4102
void check_filter_outputs(void)
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
int audio_sync_method
Definition: ffmpeg_opt.c:153
int shortest
Definition: ffmpeg.h:562
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1421
int64_t duration
Definition: movenc.c:63
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
enum AVMediaType type
Definition: ffmpeg.h:239
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:869
AVDictionary * resample_opts
Definition: ffmpeg.h:510
AVFilterContext * filter
Definition: ffmpeg.h:259
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
int nb_input_files
Definition: ffmpeg.c:150
AVCodec * dec
Definition: ffmpeg.h:304
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs)
Add a graph described by a string to a graph.
Definition: graphparser.c:407
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
int file_index
Definition: ffmpeg.h:295
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
#define av_log(a,...)
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:173
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:240
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2688
A filter pad used for either input or output.
Definition: internal.h:54
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:351
uint64_t channel_layout
Definition: ffmpeg.h:273
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:348
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
AVRational sample_aspect_ratio
Definition: ffmpeg.h:247
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:217
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define GET_CH_LAYOUT_NAME(ch_layout)
Definition: cmdutils.h:638
int sample_rate
Definition: ffmpeg.h:249
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:45
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
AVFilterContext * filter
Definition: ffmpeg.h:235
int format
Definition: ffmpeg.h:244
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
Display matrix.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
int capabilities
Codec capabilities.
Definition: avcodec.h:3574
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:4032
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:583
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3562
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int eof
Definition: ffmpeg.h:255
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format...
Definition: buffersrc.h:78
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:122
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1071
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2332
int filter_nbthreads
Definition: ffmpeg_opt.c:172
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
OutputFilter * filter
Definition: ffmpeg.h:502
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:513
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1409
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
audio channel layout utility functions
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:3576
This structure contains the parameters describing the frames that will be passed to this filter...
Definition: buffersrc.h:73
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
external API header
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:557
struct OutputStream * ost
Definition: ffmpeg.h:260
int width
picture width / height.
Definition: avcodec.h:1794
uint8_t w
Definition: llviddspenc.c:38
char * apad
Definition: ffmpeg.h:511
int width
Definition: ffmpeg.h:246
AVFormatContext * ctx
Definition: movenc.c:48
int nb_filtergraphs
Definition: ffmpeg.c:158
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int audio_channels_mapped
Definition: ffmpeg.h:497
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
enum AVCodecID codec_id
Definition: vaapi_decode.c:369
int height
Definition: ffmpeg.h:269
int sample_rate
Definition: ffmpeg.h:272
#define GET_SAMPLE_FMT_NAME(sample_fmt)
Definition: cmdutils.h:631
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1172
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:136
AVCodecContext * enc
Definition: muxing.c:55
int start_at_zero
Definition: ffmpeg_opt.c:162
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:919
int audio_volume
Definition: ffmpeg_opt.c:152
Stream structure.
Definition: avformat.h:876
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
InputFilter ** filters
Definition: ffmpeg.h:358
enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
Definition: imgconvert.c:54
#define GET_SAMPLE_RATE_NAME(rate)
Definition: cmdutils.h:634
int64_t recording_time
Definition: ffmpeg.h:408
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2301
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
AVStream * st
Definition: ffmpeg.h:296
sample_rate
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
#define AV_BPRINT_SIZE_AUTOMATIC
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
enum AVMediaType codec_type
Definition: avcodec.h:1629
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
enum AVCodecID codec_id
Definition: avcodec.h:1631
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
int ist_index
Definition: ffmpeg.h:397
const char * graph_desc
Definition: ffmpeg.h:283
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:406
void avio_w8(AVIOContext *s, int b)
Definition: aviobuf.c:191
main external API structure.
Definition: avcodec.h:1621
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:370
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1087
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
AVCodecContext * enc_ctx
Definition: ffmpeg.h:465
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:253
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int channels
Definition: ffmpeg.h:250
int * audio_channels_map
Definition: ffmpeg.h:496
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:113
int sample_rate
Sample rate of the audio data.
Definition: frame.h:467
Filter definition.
Definition: avfilter.h:144
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1011
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:443
AVCodecContext * dec_ctx
Definition: ffmpeg.h:303
AVMediaType
Definition: avutil.h:199
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
const char * name
Filter name.
Definition: avfilter.h:148
int av_buffersink_get_w(const AVFilterContext *ctx)
unsigned nb_filters
Definition: avfilter.h:843
int autorotate
Definition: ffmpeg.h:336
#define snprintf
Definition: snprintf.h:34
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:1029
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
float audio_drift_threshold
Definition: ffmpeg_opt.c:148
char * name
unique name for this input/output in the list
Definition: avfilter.h:1005
int nb_filters
Definition: ffmpeg.h:359
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1458
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:58
all automatic conversions disabled
Definition: avfilter.h:976
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
A reference to a data buffer.
Definition: buffer.h:81
static AVStream * ost
int reconfiguration
Definition: ffmpeg.h:286
struct FilterGraph * graph
Definition: ffmpeg.h:261
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
sample_rates
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:98
AVStream * st
Definition: muxing.c:54
AVBufferRef * device_ref
Definition: ffmpeg.h:76
#define AV_CODEC_CAP_LOSSLESS
Codec is lossless.
Definition: avcodec.h:1089
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:94
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
uint64_t channel_layout
Definition: ffmpeg.h:251
int copy_ts
Definition: ffmpeg_opt.c:161
AVFormatContext * ctx
Definition: ffmpeg.h:394
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:52
int do_deinterlace
Definition: ffmpeg_opt.c:156
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:622
pixel format definitions
char * avfilter
Definition: ffmpeg.h:503
uint8_t * name
Definition: ffmpeg.h:238
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int len
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
int channels
number of audio channels
Definition: avcodec.h:2282
OutputFilter ** outputs
Definition: ffmpeg.h:290
InputFile ** input_files
Definition: ffmpeg.c:149
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:87
AVFormatContext * ctx
Definition: ffmpeg.h:555
int filtergraph_is_simple(FilterGraph *fg)
An instance of a filter.
Definition: avfilter.h:338
static char * choose_pix_fmts(OutputFilter *ofilter)
static void cleanup_filtergraph(FilterGraph *fg)
AVDictionary * encoder_opts
Definition: ffmpeg.h:507
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:288
#define av_freep(p)
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
OutputFile ** output_files
Definition: ffmpeg.c:154
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1023
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:3578
int format
Definition: ffmpeg.h:271
formats
Definition: signature.h:48
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
int discard
Definition: ffmpeg.h:297
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2465
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:905
void choose_sample_fmt(AVStream *st, AVCodec *codec)
Definition: ffmpeg_filter.c:93
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int nb_inputs
Definition: ffmpeg.h:289
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:931
int index
Definition: ffmpeg.h:444
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
enum AVMediaType type
Definition: ffmpeg.h:266
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:241
#define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg)
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:462
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2684
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
InputStream ** input_streams
Definition: ffmpeg.c:147
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
Writes a formatted string to the context.
discard nothing
Definition: avcodec.h:823
const char * name
Definition: opengl_enc.c:102
static uint8_t tmp[11]
Definition: aes_ctr.c:26