FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
30 
31 #include "libavutil/avassert.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/bprint.h"
35 #include "libavutil/display.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/samplefmt.h"
41 
42 static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
43 {
44  static const enum AVPixelFormat mjpeg_formats[] =
48  static const enum AVPixelFormat ljpeg_formats[] =
52  AV_PIX_FMT_NONE};
53 
54  if (codec_id == AV_CODEC_ID_MJPEG) {
55  return mjpeg_formats;
56  } else if (codec_id == AV_CODEC_ID_LJPEG) {
57  return ljpeg_formats;
58  } else {
59  return default_formats;
60  }
61 }
62 
64 {
65  if (codec && codec->pix_fmts) {
66  const enum AVPixelFormat *p = codec->pix_fmts;
68  //FIXME: This should check for AV_PIX_FMT_FLAG_ALPHA after PAL8 pixel format without alpha is implemented
69  int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
70  enum AVPixelFormat best= AV_PIX_FMT_NONE;
71 
74  }
75  for (; *p != AV_PIX_FMT_NONE; p++) {
76  best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
77  if (*p == target)
78  break;
79  }
80  if (*p == AV_PIX_FMT_NONE) {
81  if (target != AV_PIX_FMT_NONE)
83  "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
84  av_get_pix_fmt_name(target),
85  codec->name,
86  av_get_pix_fmt_name(best));
87  return best;
88  }
89  }
90  return target;
91 }
92 
94 {
95  if (codec && codec->sample_fmts) {
96  const enum AVSampleFormat *p = codec->sample_fmts;
97  for (; *p != -1; p++) {
98  if (*p == st->codecpar->format)
99  break;
100  }
101  if (*p == -1) {
103  av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
106  "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
108  codec->name,
110  st->codecpar->format = codec->sample_fmts[0];
111  }
112  }
113 }
114 
115 static char *choose_pix_fmts(OutputFilter *ofilter)
116 {
117  OutputStream *ost = ofilter->ost;
118  AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
119  if (strict_dict)
120  // used by choose_pixel_fmt() and below
121  av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
122 
123  if (ost->keep_pix_fmt) {
126  if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
127  return NULL;
129  }
130  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
131  return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
132  } else if (ost->enc && ost->enc->pix_fmts) {
133  const enum AVPixelFormat *p;
134  AVIOContext *s = NULL;
135  uint8_t *ret;
136  int len;
137 
138  if (avio_open_dyn_buf(&s) < 0)
139  exit_program(1);
140 
141  p = ost->enc->pix_fmts;
144  }
145 
146  for (; *p != AV_PIX_FMT_NONE; p++) {
147  const char *name = av_get_pix_fmt_name(*p);
148  avio_printf(s, "%s|", name);
149  }
150  len = avio_close_dyn_buf(s, &ret);
151  ret[len - 1] = 0;
152  return ret;
153  } else
154  return NULL;
155 }
156 
157 /* Define a function for building a string containing a list of
158  * allowed formats. */
159 #define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
160 static char *choose_ ## suffix (OutputFilter *ofilter) \
161 { \
162  if (ofilter->var != none) { \
163  get_name(ofilter->var); \
164  return av_strdup(name); \
165  } else if (ofilter->supported_list) { \
166  const type *p; \
167  AVIOContext *s = NULL; \
168  uint8_t *ret; \
169  int len; \
170  \
171  if (avio_open_dyn_buf(&s) < 0) \
172  exit_program(1); \
173  \
174  for (p = ofilter->supported_list; *p != none; p++) { \
175  get_name(*p); \
176  avio_printf(s, "%s|", name); \
177  } \
178  len = avio_close_dyn_buf(s, &ret); \
179  ret[len - 1] = 0; \
180  return ret; \
181  } else \
182  return NULL; \
183 }
184 
185 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
186 // GET_PIX_FMT_NAME)
187 
190 
193 
194 DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
196 
198 {
199  FilterGraph *fg = av_mallocz(sizeof(*fg));
200 
201  if (!fg)
202  exit_program(1);
203  fg->index = nb_filtergraphs;
204 
205  GROW_ARRAY(fg->outputs, fg->nb_outputs);
206  if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
207  exit_program(1);
208  fg->outputs[0]->ost = ost;
209  fg->outputs[0]->graph = fg;
210  fg->outputs[0]->format = -1;
211 
212  ost->filter = fg->outputs[0];
213 
214  GROW_ARRAY(fg->inputs, fg->nb_inputs);
215  if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
216  exit_program(1);
217  fg->inputs[0]->ist = ist;
218  fg->inputs[0]->graph = fg;
219  fg->inputs[0]->format = -1;
220 
221  fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
222  if (!fg->inputs[0]->frame_queue)
223  exit_program(1);
224 
225  GROW_ARRAY(ist->filters, ist->nb_filters);
226  ist->filters[ist->nb_filters - 1] = fg->inputs[0];
227 
229  filtergraphs[nb_filtergraphs - 1] = fg;
230 
231  return 0;
232 }
233 
234 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
235 {
236  AVFilterContext *ctx = inout->filter_ctx;
237  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
238  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
239  AVIOContext *pb;
240  uint8_t *res = NULL;
241 
242  if (avio_open_dyn_buf(&pb) < 0)
243  exit_program(1);
244 
245  avio_printf(pb, "%s", ctx->filter->name);
246  if (nb_pads > 1)
247  avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));
248  avio_w8(pb, 0);
249  avio_close_dyn_buf(pb, &res);
250  return res;
251 }
252 
254 {
255  InputStream *ist = NULL;
257  int i;
258 
259  // TODO: support other filter types
260  if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
261  av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
262  "currently.\n");
263  exit_program(1);
264  }
265 
266  if (in->name) {
268  AVStream *st = NULL;
269  char *p;
270  int file_idx = strtol(in->name, &p, 0);
271 
272  if (file_idx < 0 || file_idx >= nb_input_files) {
273  av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
274  file_idx, fg->graph_desc);
275  exit_program(1);
276  }
277  s = input_files[file_idx]->ctx;
278 
279  for (i = 0; i < s->nb_streams; i++) {
280  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
281  if (stream_type != type &&
282  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
283  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
284  continue;
285  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
286  st = s->streams[i];
287  break;
288  }
289  }
290  if (!st) {
291  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
292  "matches no streams.\n", p, fg->graph_desc);
293  exit_program(1);
294  }
295  ist = input_streams[input_files[file_idx]->ist_index + st->index];
296  if (ist->user_set_discard == AVDISCARD_ALL) {
297  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
298  "matches a disabled input stream.\n", p, fg->graph_desc);
299  exit_program(1);
300  }
301  } else {
302  /* find the first unused stream of corresponding type */
303  for (i = 0; i < nb_input_streams; i++) {
304  ist = input_streams[i];
305  if (ist->user_set_discard == AVDISCARD_ALL)
306  continue;
307  if (ist->dec_ctx->codec_type == type && ist->discard)
308  break;
309  }
310  if (i == nb_input_streams) {
311  av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
312  "unlabeled input pad %d on filter %s\n", in->pad_idx,
313  in->filter_ctx->name);
314  exit_program(1);
315  }
316  }
317  av_assert0(ist);
318 
319  ist->discard = 0;
321  ist->st->discard = AVDISCARD_NONE;
322 
323  GROW_ARRAY(fg->inputs, fg->nb_inputs);
324  if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
325  exit_program(1);
326  fg->inputs[fg->nb_inputs - 1]->ist = ist;
327  fg->inputs[fg->nb_inputs - 1]->graph = fg;
328  fg->inputs[fg->nb_inputs - 1]->format = -1;
329  fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
330  fg->inputs[fg->nb_inputs - 1]->name = describe_filter_link(fg, in, 1);
331 
332  fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
333  if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
334  exit_program(1);
335 
336  GROW_ARRAY(ist->filters, ist->nb_filters);
337  ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
338 }
339 
341 {
342  AVFilterInOut *inputs, *outputs, *cur;
343  AVFilterGraph *graph;
344  int ret = 0;
345 
346  /* this graph is only used for determining the kinds of inputs
347  * and outputs we have, and is discarded on exit from this function */
348  graph = avfilter_graph_alloc();
349  if (!graph)
350  return AVERROR(ENOMEM);
351  graph->nb_threads = 1;
352 
353  ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
354  if (ret < 0)
355  goto fail;
356 
357  for (cur = inputs; cur; cur = cur->next)
358  init_input_filter(fg, cur);
359 
360  for (cur = outputs; cur;) {
361  GROW_ARRAY(fg->outputs, fg->nb_outputs);
362  fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]));
363  if (!fg->outputs[fg->nb_outputs - 1])
364  exit_program(1);
365 
366  fg->outputs[fg->nb_outputs - 1]->graph = fg;
367  fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
369  cur->pad_idx);
370  fg->outputs[fg->nb_outputs - 1]->name = describe_filter_link(fg, cur, 0);
371  cur = cur->next;
372  fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
373  }
374 
375 fail:
376  avfilter_inout_free(&inputs);
377  avfilter_graph_free(&graph);
378  return ret;
379 }
380 
381 static int insert_trim(int64_t start_time, int64_t duration,
382  AVFilterContext **last_filter, int *pad_idx,
383  const char *filter_name)
384 {
385  AVFilterGraph *graph = (*last_filter)->graph;
387  const AVFilter *trim;
388  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
389  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
390  int ret = 0;
391 
392  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
393  return 0;
394 
395  trim = avfilter_get_by_name(name);
396  if (!trim) {
397  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
398  "recording time.\n", name);
400  }
401 
402  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
403  if (!ctx)
404  return AVERROR(ENOMEM);
405 
406  if (duration != INT64_MAX) {
407  ret = av_opt_set_int(ctx, "durationi", duration,
409  }
410  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
411  ret = av_opt_set_int(ctx, "starti", start_time,
413  }
414  if (ret < 0) {
415  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
416  return ret;
417  }
418 
419  ret = avfilter_init_str(ctx, NULL);
420  if (ret < 0)
421  return ret;
422 
423  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
424  if (ret < 0)
425  return ret;
426 
427  *last_filter = ctx;
428  *pad_idx = 0;
429  return 0;
430 }
431 
432 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
433  const char *filter_name, const char *args)
434 {
435  AVFilterGraph *graph = (*last_filter)->graph;
437  int ret;
438 
439  ret = avfilter_graph_create_filter(&ctx,
440  avfilter_get_by_name(filter_name),
441  filter_name, args, NULL, graph);
442  if (ret < 0)
443  return ret;
444 
445  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
446  if (ret < 0)
447  return ret;
448 
449  *last_filter = ctx;
450  *pad_idx = 0;
451  return 0;
452 }
453 
455 {
456  char *pix_fmts;
457  OutputStream *ost = ofilter->ost;
458  OutputFile *of = output_files[ost->file_index];
459  AVFilterContext *last_filter = out->filter_ctx;
460  int pad_idx = out->pad_idx;
461  int ret;
462  char name[255];
463 
464  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
465  ret = avfilter_graph_create_filter(&ofilter->filter,
466  avfilter_get_by_name("buffersink"),
467  name, NULL, NULL, fg->graph);
468 
469  if (ret < 0)
470  return ret;
471 
472  if (ofilter->width || ofilter->height) {
473  char args[255];
475  AVDictionaryEntry *e = NULL;
476 
477  snprintf(args, sizeof(args), "%d:%d",
478  ofilter->width, ofilter->height);
479 
480  while ((e = av_dict_get(ost->sws_dict, "", e,
482  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
483  }
484 
485  snprintf(name, sizeof(name), "scaler_out_%d_%d",
486  ost->file_index, ost->index);
487  if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
488  name, args, NULL, fg->graph)) < 0)
489  return ret;
490  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
491  return ret;
492 
493  last_filter = filter;
494  pad_idx = 0;
495  }
496 
497  if ((pix_fmts = choose_pix_fmts(ofilter))) {
499  snprintf(name, sizeof(name), "format_out_%d_%d",
500  ost->file_index, ost->index);
501  ret = avfilter_graph_create_filter(&filter,
502  avfilter_get_by_name("format"),
503  "format", pix_fmts, NULL, fg->graph);
504  av_freep(&pix_fmts);
505  if (ret < 0)
506  return ret;
507  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
508  return ret;
509 
510  last_filter = filter;
511  pad_idx = 0;
512  }
513 
514  if (ost->frame_rate.num && 0) {
515  AVFilterContext *fps;
516  char args[255];
517 
518  snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
519  ost->frame_rate.den);
520  snprintf(name, sizeof(name), "fps_out_%d_%d",
521  ost->file_index, ost->index);
523  name, args, NULL, fg->graph);
524  if (ret < 0)
525  return ret;
526 
527  ret = avfilter_link(last_filter, pad_idx, fps, 0);
528  if (ret < 0)
529  return ret;
530  last_filter = fps;
531  pad_idx = 0;
532  }
533 
534  snprintf(name, sizeof(name), "trim_out_%d_%d",
535  ost->file_index, ost->index);
536  ret = insert_trim(of->start_time, of->recording_time,
537  &last_filter, &pad_idx, name);
538  if (ret < 0)
539  return ret;
540 
541 
542  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
543  return ret;
544 
545  return 0;
546 }
547 
549 {
550  OutputStream *ost = ofilter->ost;
551  OutputFile *of = output_files[ost->file_index];
552  AVCodecContext *codec = ost->enc_ctx;
553  AVFilterContext *last_filter = out->filter_ctx;
554  int pad_idx = out->pad_idx;
556  char name[255];
557  int ret;
558 
559  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
560  ret = avfilter_graph_create_filter(&ofilter->filter,
561  avfilter_get_by_name("abuffersink"),
562  name, NULL, NULL, fg->graph);
563  if (ret < 0)
564  return ret;
565  if ((ret = av_opt_set_int(ofilter->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
566  return ret;
567 
568 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
569  AVFilterContext *filt_ctx; \
570  \
571  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
572  "similarly to -af " filter_name "=%s.\n", arg); \
573  \
574  ret = avfilter_graph_create_filter(&filt_ctx, \
575  avfilter_get_by_name(filter_name), \
576  filter_name, arg, NULL, fg->graph); \
577  if (ret < 0) \
578  return ret; \
579  \
580  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
581  if (ret < 0) \
582  return ret; \
583  \
584  last_filter = filt_ctx; \
585  pad_idx = 0; \
586 } while (0)
587  if (ost->audio_channels_mapped) {
588  int i;
589  AVBPrint pan_buf;
590  av_bprint_init(&pan_buf, 256, 8192);
591  av_bprintf(&pan_buf, "0x%"PRIx64,
593  for (i = 0; i < ost->audio_channels_mapped; i++)
594  if (ost->audio_channels_map[i] != -1)
595  av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
596 
597  AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
598  av_bprint_finalize(&pan_buf, NULL);
599  }
600 
601  if (codec->channels && !codec->channel_layout)
603 
604  sample_fmts = choose_sample_fmts(ofilter);
605  sample_rates = choose_sample_rates(ofilter);
606  channel_layouts = choose_channel_layouts(ofilter);
607  if (sample_fmts || sample_rates || channel_layouts) {
609  char args[256];
610  args[0] = 0;
611 
612  if (sample_fmts)
613  av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
614  sample_fmts);
615  if (sample_rates)
616  av_strlcatf(args, sizeof(args), "sample_rates=%s:",
617  sample_rates);
618  if (channel_layouts)
619  av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
620  channel_layouts);
621 
622  av_freep(&sample_fmts);
623  av_freep(&sample_rates);
624  av_freep(&channel_layouts);
625 
626  snprintf(name, sizeof(name), "format_out_%d_%d",
627  ost->file_index, ost->index);
628  ret = avfilter_graph_create_filter(&format,
629  avfilter_get_by_name("aformat"),
630  name, args, NULL, fg->graph);
631  if (ret < 0)
632  return ret;
633 
634  ret = avfilter_link(last_filter, pad_idx, format, 0);
635  if (ret < 0)
636  return ret;
637 
638  last_filter = format;
639  pad_idx = 0;
640  }
641 
642  if (audio_volume != 256 && 0) {
643  char args[256];
644 
645  snprintf(args, sizeof(args), "%f", audio_volume / 256.);
646  AUTO_INSERT_FILTER("-vol", "volume", args);
647  }
648 
649  if (ost->apad && of->shortest) {
650  char args[256];
651  int i;
652 
653  for (i=0; i<of->ctx->nb_streams; i++)
655  break;
656 
657  if (i<of->ctx->nb_streams) {
658  snprintf(args, sizeof(args), "%s", ost->apad);
659  AUTO_INSERT_FILTER("-apad", "apad", args);
660  }
661  }
662 
663  snprintf(name, sizeof(name), "trim for output stream %d:%d",
664  ost->file_index, ost->index);
665  ret = insert_trim(of->start_time, of->recording_time,
666  &last_filter, &pad_idx, name);
667  if (ret < 0)
668  return ret;
669 
670  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
671  return ret;
672 
673  return 0;
674 }
675 
677 {
678  if (!ofilter->ost) {
679  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
680  exit_program(1);
681  }
682 
683  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
684  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
685  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
686  default: av_assert0(0);
687  }
688 }
689 
691 {
692  int i;
693  for (i = 0; i < nb_filtergraphs; i++) {
694  int n;
695  for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
697  if (!output->ost) {
698  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", output->name);
699  exit_program(1);
700  }
701  }
702  }
703 }
704 
705 static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
706 {
708  int i, w, h;
709 
710  /* Compute the size of the canvas for the subtitles stream.
711  If the subtitles codecpar has set a size, use it. Otherwise use the
712  maximum dimensions of the video streams in the same file. */
713  w = ifilter->width;
714  h = ifilter->height;
715  if (!(w && h)) {
716  for (i = 0; i < avf->nb_streams; i++) {
717  if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
718  w = FFMAX(w, avf->streams[i]->codecpar->width);
719  h = FFMAX(h, avf->streams[i]->codecpar->height);
720  }
721  }
722  if (!(w && h)) {
723  w = FFMAX(w, 720);
724  h = FFMAX(h, 576);
725  }
726  av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
727  }
728  ist->sub2video.w = ifilter->width = w;
729  ist->sub2video.h = ifilter->height = h;
730 
731  ifilter->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
732  ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
733 
734  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
735  palettes for all rectangles are identical or compatible */
736  ifilter->format = AV_PIX_FMT_RGB32;
737 
738  ist->sub2video.frame = av_frame_alloc();
739  if (!ist->sub2video.frame)
740  return AVERROR(ENOMEM);
741  ist->sub2video.last_pts = INT64_MIN;
742  ist->sub2video.end_pts = INT64_MIN;
743  return 0;
744 }
745 
747  AVFilterInOut *in)
748 {
749  AVFilterContext *last_filter;
750  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
751  InputStream *ist = ifilter->ist;
753  AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
754  ist->st->time_base;
755  AVRational fr = ist->framerate;
756  AVRational sar;
757  AVBPrint args;
758  char name[255];
759  int ret, pad_idx = 0;
760  int64_t tsoffset = 0;
762 
763  if (!par)
764  return AVERROR(ENOMEM);
765  memset(par, 0, sizeof(*par));
766  par->format = AV_PIX_FMT_NONE;
767 
768  if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
769  av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
770  ret = AVERROR(EINVAL);
771  goto fail;
772  }
773 
774  if (!fr.num)
775  fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
776 
778  ret = sub2video_prepare(ist, ifilter);
779  if (ret < 0)
780  goto fail;
781  }
782 
783  sar = ifilter->sample_aspect_ratio;
784  if(!sar.den)
785  sar = (AVRational){0,1};
787  av_bprintf(&args,
788  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
789  "pixel_aspect=%d/%d:sws_param=flags=%d",
790  ifilter->width, ifilter->height, ifilter->format,
791  tb.num, tb.den, sar.num, sar.den,
793  if (fr.num && fr.den)
794  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
795  snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
796  ist->file_index, ist->st->index);
797 
798 
799  if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
800  args.str, NULL, fg->graph)) < 0)
801  goto fail;
802  par->hw_frames_ctx = ifilter->hw_frames_ctx;
803  ret = av_buffersrc_parameters_set(ifilter->filter, par);
804  if (ret < 0)
805  goto fail;
806  av_freep(&par);
807  last_filter = ifilter->filter;
808 
809  if (ist->autorotate) {
810  double theta = get_rotation(ist->st);
811 
812  if (fabs(theta - 90) < 1.0) {
813  ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
814  } else if (fabs(theta - 180) < 1.0) {
815  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
816  if (ret < 0)
817  return ret;
818  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
819  } else if (fabs(theta - 270) < 1.0) {
820  ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
821  } else if (fabs(theta) > 1.0) {
822  char rotate_buf[64];
823  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
824  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
825  }
826  if (ret < 0)
827  return ret;
828  }
829 
830  if (do_deinterlace) {
831  AVFilterContext *yadif;
832 
833  snprintf(name, sizeof(name), "deinterlace_in_%d_%d",
834  ist->file_index, ist->st->index);
835  if ((ret = avfilter_graph_create_filter(&yadif,
836  avfilter_get_by_name("yadif"),
837  name, "", NULL,
838  fg->graph)) < 0)
839  return ret;
840 
841  if ((ret = avfilter_link(last_filter, 0, yadif, 0)) < 0)
842  return ret;
843 
844  last_filter = yadif;
845  }
846 
847  snprintf(name, sizeof(name), "trim_in_%d_%d",
848  ist->file_index, ist->st->index);
849  if (copy_ts) {
850  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
852  tsoffset += f->ctx->start_time;
853  }
854  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
855  AV_NOPTS_VALUE : tsoffset, f->recording_time,
856  &last_filter, &pad_idx, name);
857  if (ret < 0)
858  return ret;
859 
860  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
861  return ret;
862  return 0;
863 fail:
864  av_freep(&par);
865 
866  return ret;
867 }
868 
870  AVFilterInOut *in)
871 {
872  AVFilterContext *last_filter;
873  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
874  InputStream *ist = ifilter->ist;
876  AVBPrint args;
877  char name[255];
878  int ret, pad_idx = 0;
879  int64_t tsoffset = 0;
880 
881  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
882  av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
883  return AVERROR(EINVAL);
884  }
885 
887  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
888  1, ifilter->sample_rate,
889  ifilter->sample_rate,
890  av_get_sample_fmt_name(ifilter->format));
891  if (ifilter->channel_layout)
892  av_bprintf(&args, ":channel_layout=0x%"PRIx64,
893  ifilter->channel_layout);
894  else
895  av_bprintf(&args, ":channels=%d", ifilter->channels);
896  snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
897  ist->file_index, ist->st->index);
898 
899  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
900  name, args.str, NULL,
901  fg->graph)) < 0)
902  return ret;
903  last_filter = ifilter->filter;
904 
905 #define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
906  AVFilterContext *filt_ctx; \
907  \
908  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
909  "similarly to -af " filter_name "=%s.\n", arg); \
910  \
911  snprintf(name, sizeof(name), "graph_%d_%s_in_%d_%d", \
912  fg->index, filter_name, ist->file_index, ist->st->index); \
913  ret = avfilter_graph_create_filter(&filt_ctx, \
914  avfilter_get_by_name(filter_name), \
915  name, arg, NULL, fg->graph); \
916  if (ret < 0) \
917  return ret; \
918  \
919  ret = avfilter_link(last_filter, 0, filt_ctx, 0); \
920  if (ret < 0) \
921  return ret; \
922  \
923  last_filter = filt_ctx; \
924 } while (0)
925 
926  if (audio_sync_method > 0) {
927  char args[256] = {0};
928 
929  av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
930  if (audio_drift_threshold != 0.1)
931  av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
932  if (!fg->reconfiguration)
933  av_strlcatf(args, sizeof(args), ":first_pts=0");
934  AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
935  }
936 
937 // if (ost->audio_channels_mapped) {
938 // int i;
939 // AVBPrint pan_buf;
940 // av_bprint_init(&pan_buf, 256, 8192);
941 // av_bprintf(&pan_buf, "0x%"PRIx64,
942 // av_get_default_channel_layout(ost->audio_channels_mapped));
943 // for (i = 0; i < ost->audio_channels_mapped; i++)
944 // if (ost->audio_channels_map[i] != -1)
945 // av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
946 // AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
947 // av_bprint_finalize(&pan_buf, NULL);
948 // }
949 
950  if (audio_volume != 256) {
951  char args[256];
952 
953  av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
954  "audio filter instead.\n");
955 
956  snprintf(args, sizeof(args), "%f", audio_volume / 256.);
957  AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
958  }
959 
960  snprintf(name, sizeof(name), "trim for input stream %d:%d",
961  ist->file_index, ist->st->index);
962  if (copy_ts) {
963  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
965  tsoffset += f->ctx->start_time;
966  }
967  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
968  AV_NOPTS_VALUE : tsoffset, f->recording_time,
969  &last_filter, &pad_idx, name);
970  if (ret < 0)
971  return ret;
972 
973  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
974  return ret;
975 
976  return 0;
977 }
978 
980  AVFilterInOut *in)
981 {
982  if (!ifilter->ist->dec) {
984  "No decoder for stream #%d:%d, filtering impossible\n",
985  ifilter->ist->file_index, ifilter->ist->st->index);
987  }
988  switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
989  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
990  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
991  default: av_assert0(0);
992  }
993 }
994 
996 {
997  int i;
998  for (i = 0; i < fg->nb_outputs; i++)
999  fg->outputs[i]->filter = (AVFilterContext *)NULL;
1000  for (i = 0; i < fg->nb_inputs; i++)
1001  fg->inputs[i]->filter = (AVFilterContext *)NULL;
1002  avfilter_graph_free(&fg->graph);
1003 }
1004 
1006 {
1007  AVFilterInOut *inputs, *outputs, *cur;
1008  int ret, i, simple = filtergraph_is_simple(fg);
1009  const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
1010  fg->graph_desc;
1011 
1012  cleanup_filtergraph(fg);
1013  if (!(fg->graph = avfilter_graph_alloc()))
1014  return AVERROR(ENOMEM);
1015 
1016  if (simple) {
1017  OutputStream *ost = fg->outputs[0]->ost;
1018  char args[512];
1019  AVDictionaryEntry *e = NULL;
1020 
1022 
1023  args[0] = 0;
1024  while ((e = av_dict_get(ost->sws_dict, "", e,
1026  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1027  }
1028  if (strlen(args))
1029  args[strlen(args)-1] = 0;
1030  fg->graph->scale_sws_opts = av_strdup(args);
1031 
1032  args[0] = 0;
1033  while ((e = av_dict_get(ost->swr_opts, "", e,
1035  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1036  }
1037  if (strlen(args))
1038  args[strlen(args)-1] = 0;
1039  av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
1040 
1041  args[0] = '\0';
1042  while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, "", e,
1044  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1045  }
1046  if (strlen(args))
1047  args[strlen(args) - 1] = '\0';
1048 
1049  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1050  if (e)
1051  av_opt_set(fg->graph, "threads", e->value, 0);
1052  } else {
1054  }
1055 
1056  if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
1057  goto fail;
1058 
1061  : hw_device_ctx;
1062  for (i = 0; i < fg->graph->nb_filters; i++) {
1063  fg->graph->filters[i]->hw_device_ctx = av_buffer_ref(device);
1064  if (!fg->graph->filters[i]->hw_device_ctx) {
1065  ret = AVERROR(ENOMEM);
1066  goto fail;
1067  }
1068  }
1069  }
1070 
1071  if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
1072  const char *num_inputs;
1073  const char *num_outputs;
1074  if (!outputs) {
1075  num_outputs = "0";
1076  } else if (outputs->next) {
1077  num_outputs = ">1";
1078  } else {
1079  num_outputs = "1";
1080  }
1081  if (!inputs) {
1082  num_inputs = "0";
1083  } else if (inputs->next) {
1084  num_inputs = ">1";
1085  } else {
1086  num_inputs = "1";
1087  }
1088  av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1089  "to have exactly 1 input and 1 output."
1090  " However, it had %s input(s) and %s output(s)."
1091  " Please adjust, or use a complex filtergraph (-filter_complex) instead.\n",
1092  graph_desc, num_inputs, num_outputs);
1093  ret = AVERROR(EINVAL);
1094  goto fail;
1095  }
1096 
1097  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1098  if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
1099  avfilter_inout_free(&inputs);
1100  avfilter_inout_free(&outputs);
1101  goto fail;
1102  }
1103  avfilter_inout_free(&inputs);
1104 
1105  for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1106  configure_output_filter(fg, fg->outputs[i], cur);
1107  avfilter_inout_free(&outputs);
1108 
1109  if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1110  goto fail;
1111 
1112  /* limit the lists of allowed formats to the ones selected, to
1113  * make sure they stay the same if the filtergraph is reconfigured later */
1114  for (i = 0; i < fg->nb_outputs; i++) {
1115  OutputFilter *ofilter = fg->outputs[i];
1116  AVFilterContext *sink = ofilter->filter;
1117 
1118  ofilter->format = av_buffersink_get_format(sink);
1119 
1120  ofilter->width = av_buffersink_get_w(sink);
1121  ofilter->height = av_buffersink_get_h(sink);
1122 
1123  ofilter->sample_rate = av_buffersink_get_sample_rate(sink);
1125  }
1126 
1127  fg->reconfiguration = 1;
1128 
1129  for (i = 0; i < fg->nb_outputs; i++) {
1130  OutputStream *ost = fg->outputs[i]->ost;
1131  if (!ost->enc) {
1132  /* identical to the same check in ffmpeg.c, needed because
1133  complex filter graphs are initialized earlier */
1134  av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
1135  avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
1136  ret = AVERROR(EINVAL);
1137  goto fail;
1138  }
1139  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1140  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
1142  ost->enc_ctx->frame_size);
1143  }
1144 
1145  for (i = 0; i < fg->nb_inputs; i++) {
1146  while (av_fifo_size(fg->inputs[i]->frame_queue)) {
1147  AVFrame *tmp;
1148  av_fifo_generic_read(fg->inputs[i]->frame_queue, &tmp, sizeof(tmp), NULL);
1149  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, tmp);
1150  av_frame_free(&tmp);
1151  if (ret < 0)
1152  goto fail;
1153  }
1154  }
1155 
1156  /* send the EOFs for the finished inputs */
1157  for (i = 0; i < fg->nb_inputs; i++) {
1158  if (fg->inputs[i]->eof) {
1159  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, NULL);
1160  if (ret < 0)
1161  goto fail;
1162  }
1163  }
1164 
1165  /* process queued up subtitle packets */
1166  for (i = 0; i < fg->nb_inputs; i++) {
1167  InputStream *ist = fg->inputs[i]->ist;
1168  if (ist->sub2video.sub_queue && ist->sub2video.frame) {
1169  while (av_fifo_size(ist->sub2video.sub_queue)) {
1170  AVSubtitle tmp;
1171  av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
1172  sub2video_update(ist, &tmp);
1173  avsubtitle_free(&tmp);
1174  }
1175  }
1176  }
1177 
1178  return 0;
1179 
1180 fail:
1181  cleanup_filtergraph(fg);
1182  return ret;
1183 }
1184 
1186 {
1187  av_buffer_unref(&ifilter->hw_frames_ctx);
1188 
1189  ifilter->format = frame->format;
1190 
1191  ifilter->width = frame->width;
1192  ifilter->height = frame->height;
1193  ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
1194 
1195  ifilter->sample_rate = frame->sample_rate;
1196  ifilter->channels = frame->channels;
1197  ifilter->channel_layout = frame->channel_layout;
1198 
1199  if (frame->hw_frames_ctx) {
1200  ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
1201  if (!ifilter->hw_frames_ctx)
1202  return AVERROR(ENOMEM);
1203  }
1204 
1205  return 0;
1206 }
1207 
1209 {
1210  int i;
1211  for (i = 0; i < fg->nb_inputs; i++)
1212  if (fg->inputs[i]->ist == ist)
1213  return 1;
1214  return 0;
1215 }
1216 
1218 {
1219  return !fg->graph_desc;
1220 }
AVFilterContext ** filters
Definition: avfilter.h:842
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
#define NULL
Definition: coverity.c:32
int width
Definition: ffmpeg.h:270
int keep_pix_fmt
Definition: ffmpeg.h:528
Bytestream IO Context.
Definition: avio.h:161
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:558
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:263
int nb_outputs
Definition: ffmpeg.h:292
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
AVDictionary * swr_opts
Definition: ffmpeg.h:509
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:302
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer)
Return the written size and a pointer to the buffer.
Definition: aviobuf.c:1459
AVRational frame_rate
Definition: ffmpeg.h:477
HWDevice * filter_hw_device
Definition: ffmpeg_opt.c:82
double get_rotation(AVStream *st)
Definition: cmdutils.c:2182
int accurate_seek
Definition: ffmpeg.h:413
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Main libavfilter public API header.
Memory buffer source API.
const char * desc
Definition: nvenc.c:68
AVRational framerate
Definition: ffmpeg.h:333
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
int height
Definition: ffmpeg.h:247
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:2078
enum AVCodecID codec_id
Definition: qsv.c:77
AVFilterInOut * out_tmp
Definition: ffmpeg.h:266
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:1052
int decoding_needed
Definition: ffmpeg.h:300
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:3968
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:240
int num
Numerator.
Definition: rational.h:59
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
int index
stream index in AVFormatContext
Definition: avformat.h:882
int init_complex_filtergraph(FilterGraph *fg)
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
AVBufferRef * hw_device_ctx
For filters which will create hardware frames, sets the device the filter should create them in...
Definition: avfilter.h:394
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1778
GLint GLenum type
Definition: opengl_enc.c:104
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
discard all
Definition: avcodec.h:814
int nb_input_streams
Definition: ffmpeg.c:148
#define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
static enum AVPixelFormat * get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
Definition: ffmpeg_filter.c:42
AVCodec.
Definition: avcodec.h:3492
int avio_open_dyn_buf(AVIOContext **s)
Open a write only memory stream.
Definition: aviobuf.c:1430
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:559
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:283
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
struct FilterGraph * graph
Definition: ffmpeg.h:238
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:634
Format I/O context.
Definition: avformat.h:1358
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5126
#define SWS_BILINEAR
Definition: swscale.h:59
int configure_filtergraph(FilterGraph *fg)
memory buffer sink API for audio and video
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCodec *codec, enum AVPixelFormat target)
Definition: ffmpeg_filter.c:63
struct InputStream * ist
Definition: ffmpeg.h:237
char * name
name of this filter instance
Definition: avfilter.h:343
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:286
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
int user_set_discard
Definition: ffmpeg.h:299
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
static int64_t start_time
Definition: ffplay.c:331
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
AVDictionary * sws_dict
Definition: ffmpeg.h:508
int width
Video only.
Definition: avcodec.h:4034
void check_filter_outputs(void)
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
int audio_sync_method
Definition: ffmpeg_opt.c:92
int shortest
Definition: ffmpeg.h:562
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1426
int64_t duration
Definition: movenc.c:63
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
enum AVMediaType type
Definition: ffmpeg.h:240
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:869
AVDictionary * resample_opts
Definition: ffmpeg.h:510
AVFilterContext * filter
Definition: ffmpeg.h:260
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
int nb_input_files
Definition: ffmpeg.c:150
AVCodec * dec
Definition: ffmpeg.h:305
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs)
Add a graph described by a string to a graph.
Definition: graphparser.c:407
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
int file_index
Definition: ffmpeg.h:296
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
#define av_log(a,...)
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:112
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2635
A filter pad used for either input or output.
Definition: internal.h:54
all automatic conversions disabled
Definition: avfilter.h:976
uint64_t channel_layout
Definition: ffmpeg.h:274
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:349
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
AVRational sample_aspect_ratio
Definition: ffmpeg.h:248
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:215
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define GET_CH_LAYOUT_NAME(ch_layout)
Definition: cmdutils.h:638
int sample_rate
Definition: ffmpeg.h:250
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:45
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
AVFilterContext * filter
Definition: ffmpeg.h:236
int format
Definition: ffmpeg.h:245
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
Display matrix.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
int capabilities
Codec capabilities.
Definition: avcodec.h:3511
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3964
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1648
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:568
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3499
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int eof
Definition: ffmpeg.h:256
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format...
Definition: buffersrc.h:78
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:122
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1056
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2279
int filter_nbthreads
Definition: ffmpeg_opt.c:111
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
OutputFilter * filter
Definition: ffmpeg.h:502
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:498
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1414
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
audio channel layout utility functions
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:3513
This structure contains the parameters describing the frames that will be passed to this filter...
Definition: buffersrc.h:73
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:911
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
external API header
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:557
struct OutputStream * ost
Definition: ffmpeg.h:261
int width
picture width / height.
Definition: avcodec.h:1741
uint8_t w
Definition: llviddspenc.c:38
char * apad
Definition: ffmpeg.h:511
AVS_Value args
Definition: avisynth_c.h:775
int width
Definition: ffmpeg.h:247
AVFormatContext * ctx
Definition: movenc.c:48
int nb_filtergraphs
Definition: ffmpeg.c:158
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int audio_channels_mapped
Definition: ffmpeg.h:497
int n
Definition: avisynth_c.h:760
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
int height
Definition: ffmpeg.h:270
int sample_rate
Definition: ffmpeg.h:273
#define GET_SAMPLE_FMT_NAME(sample_fmt)
Definition: cmdutils.h:631
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1166
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:136
AVCodecContext * enc
Definition: muxing.c:55
int start_at_zero
Definition: ffmpeg_opt.c:101
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:937
int audio_volume
Definition: ffmpeg_opt.c:91
Stream structure.
Definition: avformat.h:881
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
InputFilter ** filters
Definition: ffmpeg.h:358
enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
Definition: imgconvert.c:54
#define GET_SAMPLE_RATE_NAME(rate)
Definition: cmdutils.h:634
int64_t recording_time
Definition: ffmpeg.h:408
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2248
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
AVStream * st
Definition: ffmpeg.h:297
sample_rate
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
#define AV_BPRINT_SIZE_AUTOMATIC
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
enum AVMediaType codec_type
Definition: avcodec.h:1576
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
enum AVCodecID codec_id
Definition: avcodec.h:1578
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
int ist_index
Definition: ffmpeg.h:397
const char * graph_desc
Definition: ffmpeg.h:284
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:406
void avio_w8(AVIOContext *s, int b)
Definition: aviobuf.c:196
main external API structure.
Definition: avcodec.h:1568
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1081
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
AVCodecContext * enc_ctx
Definition: ffmpeg.h:465
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:254
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int channels
Definition: ffmpeg.h:251
int * audio_channels_map
Definition: ffmpeg.h:496
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:113
int sample_rate
Sample rate of the audio data.
Definition: frame.h:467
Filter definition.
Definition: avfilter.h:144
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1011
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:443
AVCodecContext * dec_ctx
Definition: ffmpeg.h:304
AVMediaType
Definition: avutil.h:199
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
const char * name
Filter name.
Definition: avfilter.h:148
int av_buffersink_get_w(const AVFilterContext *ctx)
unsigned nb_filters
Definition: avfilter.h:843
int autorotate
Definition: ffmpeg.h:337
#define snprintf
Definition: snprintf.h:34
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:1047
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
float audio_drift_threshold
Definition: ffmpeg_opt.c:87
char * name
unique name for this input/output in the list
Definition: avfilter.h:1005
int nb_filters
Definition: ffmpeg.h:359
#define SWS_BITEXACT
Definition: swscale.h:84
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1463
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:58
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
A reference to a data buffer.
Definition: buffer.h:81
static AVStream * ost
int reconfiguration
Definition: ffmpeg.h:287
struct FilterGraph * graph
Definition: ffmpeg.h:262
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
sample_rates
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:99
AVStream * st
Definition: muxing.c:54
AVBufferRef * device_ref
Definition: ffmpeg.h:77
#define AV_CODEC_CAP_LOSSLESS
Codec is lossless.
Definition: avcodec.h:1074
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
uint64_t channel_layout
Definition: ffmpeg.h:252
int copy_ts
Definition: ffmpeg_opt.c:100
AVFormatContext * ctx
Definition: ffmpeg.h:394
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:52
int do_deinterlace
Definition: ffmpeg_opt.c:95
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:622
pixel format definitions
char * avfilter
Definition: ffmpeg.h:503
uint8_t * name
Definition: ffmpeg.h:239
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int len
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
int channels
number of audio channels
Definition: avcodec.h:2229
OutputFilter ** outputs
Definition: ffmpeg.h:291
InputFile ** input_files
Definition: ffmpeg.c:149
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:88
AVFormatContext * ctx
Definition: ffmpeg.h:555
int filtergraph_is_simple(FilterGraph *fg)
An instance of a filter.
Definition: avfilter.h:338
static char * choose_pix_fmts(OutputFilter *ofilter)
static void cleanup_filtergraph(FilterGraph *fg)
AVDictionary * encoder_opts
Definition: ffmpeg.h:507
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:289
#define av_freep(p)
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
OutputFile ** output_files
Definition: ffmpeg.c:154
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1028
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:3515
int format
Definition: ffmpeg.h:272
formats
Definition: signature.h:48
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
int discard
Definition: ffmpeg.h:298
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:910
void choose_sample_fmt(AVStream *st, AVCodec *codec)
Definition: ffmpeg_filter.c:93
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int nb_inputs
Definition: ffmpeg.h:290
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:936
int index
Definition: ffmpeg.h:444
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
enum AVMediaType type
Definition: ffmpeg.h:267
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:242
#define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg)
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:449
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2631
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:150
InputStream ** input_streams
Definition: ffmpeg.c:147
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
Writes a formatted string to the context.
discard nothing
Definition: avcodec.h:808
const char * name
Definition: opengl_enc.c:102
static uint8_t tmp[11]
Definition: aes_ctr.c:26