FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
30 
31 #include "libavutil/avassert.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/bprint.h"
35 #include "libavutil/display.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/samplefmt.h"
41 
42 static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
43 {
44  static const enum AVPixelFormat mjpeg_formats[] =
48  static const enum AVPixelFormat ljpeg_formats[] =
52  AV_PIX_FMT_NONE};
53 
54  if (codec_id == AV_CODEC_ID_MJPEG) {
55  return mjpeg_formats;
56  } else if (codec_id == AV_CODEC_ID_LJPEG) {
57  return ljpeg_formats;
58  } else {
59  return default_formats;
60  }
61 }
62 
64 {
65  if (codec && codec->pix_fmts) {
66  const enum AVPixelFormat *p = codec->pix_fmts;
68  //FIXME: This should check for AV_PIX_FMT_FLAG_ALPHA after PAL8 pixel format without alpha is implemented
69  int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
70  enum AVPixelFormat best= AV_PIX_FMT_NONE;
71 
74  }
75  for (; *p != AV_PIX_FMT_NONE; p++) {
76  best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
77  if (*p == target)
78  break;
79  }
80  if (*p == AV_PIX_FMT_NONE) {
81  if (target != AV_PIX_FMT_NONE)
83  "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
84  av_get_pix_fmt_name(target),
85  codec->name,
86  av_get_pix_fmt_name(best));
87  return best;
88  }
89  }
90  return target;
91 }
92 
94 {
95  if (codec && codec->sample_fmts) {
96  const enum AVSampleFormat *p = codec->sample_fmts;
97  for (; *p != -1; p++) {
98  if (*p == st->codecpar->format)
99  break;
100  }
101  if (*p == -1) {
104  av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
107  "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
109  codec->name,
111  st->codecpar->format = codec->sample_fmts[0];
112  }
113  }
114 }
115 
116 static char *choose_pix_fmts(OutputFilter *ofilter)
117 {
118  OutputStream *ost = ofilter->ost;
119  AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
120  if (strict_dict)
121  // used by choose_pixel_fmt() and below
122  av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
123 
124  if (ost->keep_pix_fmt) {
127  if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
128  return NULL;
130  }
131  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
132  return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
133  } else if (ost->enc && ost->enc->pix_fmts) {
134  const enum AVPixelFormat *p;
135  AVIOContext *s = NULL;
136  uint8_t *ret;
137  int len;
138 
139  if (avio_open_dyn_buf(&s) < 0)
140  exit_program(1);
141 
142  p = ost->enc->pix_fmts;
145  }
146 
147  for (; *p != AV_PIX_FMT_NONE; p++) {
148  const char *name = av_get_pix_fmt_name(*p);
149  avio_printf(s, "%s|", name);
150  }
151  len = avio_close_dyn_buf(s, &ret);
152  ret[len - 1] = 0;
153  return ret;
154  } else
155  return NULL;
156 }
157 
158 /* Define a function for building a string containing a list of
159  * allowed formats. */
160 #define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
161 static char *choose_ ## suffix (OutputFilter *ofilter) \
162 { \
163  if (ofilter->var != none) { \
164  get_name(ofilter->var); \
165  return av_strdup(name); \
166  } else if (ofilter->supported_list) { \
167  const type *p; \
168  AVIOContext *s = NULL; \
169  uint8_t *ret; \
170  int len; \
171  \
172  if (avio_open_dyn_buf(&s) < 0) \
173  exit_program(1); \
174  \
175  for (p = ofilter->supported_list; *p != none; p++) { \
176  get_name(*p); \
177  avio_printf(s, "%s|", name); \
178  } \
179  len = avio_close_dyn_buf(s, &ret); \
180  ret[len - 1] = 0; \
181  return ret; \
182  } else \
183  return NULL; \
184 }
185 
186 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
187 // GET_PIX_FMT_NAME)
188 
191 
194 
195 DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
197 
199 {
200  FilterGraph *fg = av_mallocz(sizeof(*fg));
201 
202  if (!fg)
203  exit_program(1);
204  fg->index = nb_filtergraphs;
205 
206  GROW_ARRAY(fg->outputs, fg->nb_outputs);
207  if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
208  exit_program(1);
209  fg->outputs[0]->ost = ost;
210  fg->outputs[0]->graph = fg;
211  fg->outputs[0]->format = -1;
212 
213  ost->filter = fg->outputs[0];
214 
215  GROW_ARRAY(fg->inputs, fg->nb_inputs);
216  if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
217  exit_program(1);
218  fg->inputs[0]->ist = ist;
219  fg->inputs[0]->graph = fg;
220  fg->inputs[0]->format = -1;
221 
222  fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
223  if (!fg->inputs[0]->frame_queue)
224  exit_program(1);
225 
226  GROW_ARRAY(ist->filters, ist->nb_filters);
227  ist->filters[ist->nb_filters - 1] = fg->inputs[0];
228 
230  filtergraphs[nb_filtergraphs - 1] = fg;
231 
232  return 0;
233 }
234 
235 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
236 {
237  AVFilterContext *ctx = inout->filter_ctx;
238  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
239  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
240  AVIOContext *pb;
241  uint8_t *res = NULL;
242 
243  if (avio_open_dyn_buf(&pb) < 0)
244  exit_program(1);
245 
246  avio_printf(pb, "%s", ctx->filter->name);
247  if (nb_pads > 1)
248  avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));
249  avio_w8(pb, 0);
250  avio_close_dyn_buf(pb, &res);
251  return res;
252 }
253 
255 {
256  InputStream *ist = NULL;
258  int i;
259 
260  // TODO: support other filter types
261  if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
262  av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
263  "currently.\n");
264  exit_program(1);
265  }
266 
267  if (in->name) {
269  AVStream *st = NULL;
270  char *p;
271  int file_idx = strtol(in->name, &p, 0);
272 
273  if (file_idx < 0 || file_idx >= nb_input_files) {
274  av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
275  file_idx, fg->graph_desc);
276  exit_program(1);
277  }
278  s = input_files[file_idx]->ctx;
279 
280  for (i = 0; i < s->nb_streams; i++) {
281  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
282  if (stream_type != type &&
283  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
284  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
285  continue;
286  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
287  st = s->streams[i];
288  break;
289  }
290  }
291  if (!st) {
292  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
293  "matches no streams.\n", p, fg->graph_desc);
294  exit_program(1);
295  }
296  ist = input_streams[input_files[file_idx]->ist_index + st->index];
297  if (ist->user_set_discard == AVDISCARD_ALL) {
298  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
299  "matches a disabled input stream.\n", p, fg->graph_desc);
300  exit_program(1);
301  }
302  } else {
303  /* find the first unused stream of corresponding type */
304  for (i = 0; i < nb_input_streams; i++) {
305  ist = input_streams[i];
306  if (ist->user_set_discard == AVDISCARD_ALL)
307  continue;
308  if (ist->dec_ctx->codec_type == type && ist->discard)
309  break;
310  }
311  if (i == nb_input_streams) {
312  av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
313  "unlabeled input pad %d on filter %s\n", in->pad_idx,
314  in->filter_ctx->name);
315  exit_program(1);
316  }
317  }
318  av_assert0(ist);
319 
320  ist->discard = 0;
322  ist->st->discard = AVDISCARD_NONE;
323 
324  GROW_ARRAY(fg->inputs, fg->nb_inputs);
325  if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
326  exit_program(1);
327  fg->inputs[fg->nb_inputs - 1]->ist = ist;
328  fg->inputs[fg->nb_inputs - 1]->graph = fg;
329  fg->inputs[fg->nb_inputs - 1]->format = -1;
330  fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
331  fg->inputs[fg->nb_inputs - 1]->name = describe_filter_link(fg, in, 1);
332 
333  fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
334  if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
335  exit_program(1);
336 
337  GROW_ARRAY(ist->filters, ist->nb_filters);
338  ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
339 }
340 
342 {
343  AVFilterInOut *inputs, *outputs, *cur;
344  AVFilterGraph *graph;
345  int ret = 0;
346 
347  /* this graph is only used for determining the kinds of inputs
348  * and outputs we have, and is discarded on exit from this function */
349  graph = avfilter_graph_alloc();
350  if (!graph)
351  return AVERROR(ENOMEM);
352  graph->nb_threads = 1;
353 
354  ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
355  if (ret < 0)
356  goto fail;
357 
358  for (cur = inputs; cur; cur = cur->next)
359  init_input_filter(fg, cur);
360 
361  for (cur = outputs; cur;) {
362  GROW_ARRAY(fg->outputs, fg->nb_outputs);
363  fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]));
364  if (!fg->outputs[fg->nb_outputs - 1])
365  exit_program(1);
366 
367  fg->outputs[fg->nb_outputs - 1]->graph = fg;
368  fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
370  cur->pad_idx);
371  fg->outputs[fg->nb_outputs - 1]->name = describe_filter_link(fg, cur, 0);
372  cur = cur->next;
373  fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
374  }
375 
376 fail:
377  avfilter_inout_free(&inputs);
378  avfilter_graph_free(&graph);
379  return ret;
380 }
381 
382 static int insert_trim(int64_t start_time, int64_t duration,
383  AVFilterContext **last_filter, int *pad_idx,
384  const char *filter_name)
385 {
386  AVFilterGraph *graph = (*last_filter)->graph;
388  const AVFilter *trim;
389  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
390  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
391  int ret = 0;
392 
393  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
394  return 0;
395 
396  trim = avfilter_get_by_name(name);
397  if (!trim) {
398  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
399  "recording time.\n", name);
401  }
402 
403  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
404  if (!ctx)
405  return AVERROR(ENOMEM);
406 
407  if (duration != INT64_MAX) {
408  ret = av_opt_set_int(ctx, "durationi", duration,
410  }
411  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
412  ret = av_opt_set_int(ctx, "starti", start_time,
414  }
415  if (ret < 0) {
416  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
417  return ret;
418  }
419 
420  ret = avfilter_init_str(ctx, NULL);
421  if (ret < 0)
422  return ret;
423 
424  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
425  if (ret < 0)
426  return ret;
427 
428  *last_filter = ctx;
429  *pad_idx = 0;
430  return 0;
431 }
432 
433 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
434  const char *filter_name, const char *args)
435 {
436  AVFilterGraph *graph = (*last_filter)->graph;
438  int ret;
439 
440  ret = avfilter_graph_create_filter(&ctx,
441  avfilter_get_by_name(filter_name),
442  filter_name, args, NULL, graph);
443  if (ret < 0)
444  return ret;
445 
446  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
447  if (ret < 0)
448  return ret;
449 
450  *last_filter = ctx;
451  *pad_idx = 0;
452  return 0;
453 }
454 
456 {
457  char *pix_fmts;
458  OutputStream *ost = ofilter->ost;
459  OutputFile *of = output_files[ost->file_index];
460  AVFilterContext *last_filter = out->filter_ctx;
461  int pad_idx = out->pad_idx;
462  int ret;
463  char name[255];
464 
465  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
466  ret = avfilter_graph_create_filter(&ofilter->filter,
467  avfilter_get_by_name("buffersink"),
468  name, NULL, NULL, fg->graph);
469 
470  if (ret < 0)
471  return ret;
472 
473  if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) {
474  char args[255];
476  AVDictionaryEntry *e = NULL;
477 
478  snprintf(args, sizeof(args), "%d:%d",
479  ofilter->width, ofilter->height);
480 
481  while ((e = av_dict_get(ost->sws_dict, "", e,
483  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
484  }
485 
486  snprintf(name, sizeof(name), "scaler_out_%d_%d",
487  ost->file_index, ost->index);
488  if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
489  name, args, NULL, fg->graph)) < 0)
490  return ret;
491  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
492  return ret;
493 
494  last_filter = filter;
495  pad_idx = 0;
496  }
497 
498  if ((pix_fmts = choose_pix_fmts(ofilter))) {
500  snprintf(name, sizeof(name), "format_out_%d_%d",
501  ost->file_index, ost->index);
502  ret = avfilter_graph_create_filter(&filter,
503  avfilter_get_by_name("format"),
504  "format", pix_fmts, NULL, fg->graph);
505  av_freep(&pix_fmts);
506  if (ret < 0)
507  return ret;
508  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
509  return ret;
510 
511  last_filter = filter;
512  pad_idx = 0;
513  }
514 
515  if (ost->frame_rate.num && 0) {
516  AVFilterContext *fps;
517  char args[255];
518 
519  snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
520  ost->frame_rate.den);
521  snprintf(name, sizeof(name), "fps_out_%d_%d",
522  ost->file_index, ost->index);
524  name, args, NULL, fg->graph);
525  if (ret < 0)
526  return ret;
527 
528  ret = avfilter_link(last_filter, pad_idx, fps, 0);
529  if (ret < 0)
530  return ret;
531  last_filter = fps;
532  pad_idx = 0;
533  }
534 
535  snprintf(name, sizeof(name), "trim_out_%d_%d",
536  ost->file_index, ost->index);
537  ret = insert_trim(of->start_time, of->recording_time,
538  &last_filter, &pad_idx, name);
539  if (ret < 0)
540  return ret;
541 
542 
543  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
544  return ret;
545 
546  return 0;
547 }
548 
550 {
551  OutputStream *ost = ofilter->ost;
552  OutputFile *of = output_files[ost->file_index];
553  AVCodecContext *codec = ost->enc_ctx;
554  AVFilterContext *last_filter = out->filter_ctx;
555  int pad_idx = out->pad_idx;
557  char name[255];
558  int ret;
559 
560  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
561  ret = avfilter_graph_create_filter(&ofilter->filter,
562  avfilter_get_by_name("abuffersink"),
563  name, NULL, NULL, fg->graph);
564  if (ret < 0)
565  return ret;
566  if ((ret = av_opt_set_int(ofilter->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
567  return ret;
568 
569 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
570  AVFilterContext *filt_ctx; \
571  \
572  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
573  "similarly to -af " filter_name "=%s.\n", arg); \
574  \
575  ret = avfilter_graph_create_filter(&filt_ctx, \
576  avfilter_get_by_name(filter_name), \
577  filter_name, arg, NULL, fg->graph); \
578  if (ret < 0) \
579  return ret; \
580  \
581  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
582  if (ret < 0) \
583  return ret; \
584  \
585  last_filter = filt_ctx; \
586  pad_idx = 0; \
587 } while (0)
588  if (ost->audio_channels_mapped) {
589  int i;
590  AVBPrint pan_buf;
591  av_bprint_init(&pan_buf, 256, 8192);
592  av_bprintf(&pan_buf, "0x%"PRIx64,
594  for (i = 0; i < ost->audio_channels_mapped; i++)
595  if (ost->audio_channels_map[i] != -1)
596  av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
597 
598  AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
599  av_bprint_finalize(&pan_buf, NULL);
600  }
601 
602  if (codec->channels && !codec->channel_layout)
604 
605  sample_fmts = choose_sample_fmts(ofilter);
606  sample_rates = choose_sample_rates(ofilter);
607  channel_layouts = choose_channel_layouts(ofilter);
608  if (sample_fmts || sample_rates || channel_layouts) {
610  char args[256];
611  args[0] = 0;
612 
613  if (sample_fmts)
614  av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
615  sample_fmts);
616  if (sample_rates)
617  av_strlcatf(args, sizeof(args), "sample_rates=%s:",
618  sample_rates);
619  if (channel_layouts)
620  av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
621  channel_layouts);
622 
623  av_freep(&sample_fmts);
624  av_freep(&sample_rates);
625  av_freep(&channel_layouts);
626 
627  snprintf(name, sizeof(name), "format_out_%d_%d",
628  ost->file_index, ost->index);
629  ret = avfilter_graph_create_filter(&format,
630  avfilter_get_by_name("aformat"),
631  name, args, NULL, fg->graph);
632  if (ret < 0)
633  return ret;
634 
635  ret = avfilter_link(last_filter, pad_idx, format, 0);
636  if (ret < 0)
637  return ret;
638 
639  last_filter = format;
640  pad_idx = 0;
641  }
642 
643  if (audio_volume != 256 && 0) {
644  char args[256];
645 
646  snprintf(args, sizeof(args), "%f", audio_volume / 256.);
647  AUTO_INSERT_FILTER("-vol", "volume", args);
648  }
649 
650  if (ost->apad && of->shortest) {
651  char args[256];
652  int i;
653 
654  for (i=0; i<of->ctx->nb_streams; i++)
656  break;
657 
658  if (i<of->ctx->nb_streams) {
659  snprintf(args, sizeof(args), "%s", ost->apad);
660  AUTO_INSERT_FILTER("-apad", "apad", args);
661  }
662  }
663 
664  snprintf(name, sizeof(name), "trim for output stream %d:%d",
665  ost->file_index, ost->index);
666  ret = insert_trim(of->start_time, of->recording_time,
667  &last_filter, &pad_idx, name);
668  if (ret < 0)
669  return ret;
670 
671  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
672  return ret;
673 
674  return 0;
675 }
676 
678 {
679  if (!ofilter->ost) {
680  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
681  exit_program(1);
682  }
683 
684  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
685  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
686  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
687  default: av_assert0(0);
688  }
689 }
690 
692 {
693  int i;
694  for (i = 0; i < nb_filtergraphs; i++) {
695  int n;
696  for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
698  if (!output->ost) {
699  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", output->name);
700  exit_program(1);
701  }
702  }
703  }
704 }
705 
706 static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
707 {
709  int i, w, h;
710 
711  /* Compute the size of the canvas for the subtitles stream.
712  If the subtitles codecpar has set a size, use it. Otherwise use the
713  maximum dimensions of the video streams in the same file. */
714  w = ifilter->width;
715  h = ifilter->height;
716  if (!(w && h)) {
717  for (i = 0; i < avf->nb_streams; i++) {
718  if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
719  w = FFMAX(w, avf->streams[i]->codecpar->width);
720  h = FFMAX(h, avf->streams[i]->codecpar->height);
721  }
722  }
723  if (!(w && h)) {
724  w = FFMAX(w, 720);
725  h = FFMAX(h, 576);
726  }
727  av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
728  }
729  ist->sub2video.w = ifilter->width = w;
730  ist->sub2video.h = ifilter->height = h;
731 
732  ifilter->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
733  ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
734 
735  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
736  palettes for all rectangles are identical or compatible */
737  ifilter->format = AV_PIX_FMT_RGB32;
738 
739  ist->sub2video.frame = av_frame_alloc();
740  if (!ist->sub2video.frame)
741  return AVERROR(ENOMEM);
742  ist->sub2video.last_pts = INT64_MIN;
743  ist->sub2video.end_pts = INT64_MIN;
744 
745  /* sub2video structure has been (re-)initialized.
746  Mark it as such so that the system will be
747  initialized with the first received heartbeat. */
748  ist->sub2video.initialize = 1;
749 
750  return 0;
751 }
752 
754  AVFilterInOut *in)
755 {
756  AVFilterContext *last_filter;
757  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
758  InputStream *ist = ifilter->ist;
760  AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
761  ist->st->time_base;
762  AVRational fr = ist->framerate;
763  AVRational sar;
764  AVBPrint args;
765  char name[255];
766  int ret, pad_idx = 0;
767  int64_t tsoffset = 0;
769 
770  if (!par)
771  return AVERROR(ENOMEM);
772  memset(par, 0, sizeof(*par));
773  par->format = AV_PIX_FMT_NONE;
774 
775  if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
776  av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
777  ret = AVERROR(EINVAL);
778  goto fail;
779  }
780 
781  if (!fr.num)
782  fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
783 
785  ret = sub2video_prepare(ist, ifilter);
786  if (ret < 0)
787  goto fail;
788  }
789 
790  sar = ifilter->sample_aspect_ratio;
791  if(!sar.den)
792  sar = (AVRational){0,1};
794  av_bprintf(&args,
795  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
796  "pixel_aspect=%d/%d",
797  ifilter->width, ifilter->height, ifilter->format,
798  tb.num, tb.den, sar.num, sar.den);
799  if (fr.num && fr.den)
800  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
801  snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
802  ist->file_index, ist->st->index);
803 
804 
805  if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
806  args.str, NULL, fg->graph)) < 0)
807  goto fail;
808  par->hw_frames_ctx = ifilter->hw_frames_ctx;
809  ret = av_buffersrc_parameters_set(ifilter->filter, par);
810  if (ret < 0)
811  goto fail;
812  av_freep(&par);
813  last_filter = ifilter->filter;
814 
815  if (ist->autorotate) {
816  double theta = get_rotation(ist->st);
817 
818  if (fabs(theta - 90) < 1.0) {
819  ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
820  } else if (fabs(theta - 180) < 1.0) {
821  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
822  if (ret < 0)
823  return ret;
824  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
825  } else if (fabs(theta - 270) < 1.0) {
826  ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
827  } else if (fabs(theta) > 1.0) {
828  char rotate_buf[64];
829  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
830  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
831  }
832  if (ret < 0)
833  return ret;
834  }
835 
836  if (do_deinterlace) {
837  AVFilterContext *yadif;
838 
839  snprintf(name, sizeof(name), "deinterlace_in_%d_%d",
840  ist->file_index, ist->st->index);
841  if ((ret = avfilter_graph_create_filter(&yadif,
842  avfilter_get_by_name("yadif"),
843  name, "", NULL,
844  fg->graph)) < 0)
845  return ret;
846 
847  if ((ret = avfilter_link(last_filter, 0, yadif, 0)) < 0)
848  return ret;
849 
850  last_filter = yadif;
851  }
852 
853  snprintf(name, sizeof(name), "trim_in_%d_%d",
854  ist->file_index, ist->st->index);
855  if (copy_ts) {
856  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
858  tsoffset += f->ctx->start_time;
859  }
860  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
861  AV_NOPTS_VALUE : tsoffset, f->recording_time,
862  &last_filter, &pad_idx, name);
863  if (ret < 0)
864  return ret;
865 
866  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
867  return ret;
868  return 0;
869 fail:
870  av_freep(&par);
871 
872  return ret;
873 }
874 
876  AVFilterInOut *in)
877 {
878  AVFilterContext *last_filter;
879  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
880  InputStream *ist = ifilter->ist;
882  AVBPrint args;
883  char name[255];
884  int ret, pad_idx = 0;
885  int64_t tsoffset = 0;
886 
887  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
888  av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
889  return AVERROR(EINVAL);
890  }
891 
893  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
894  1, ifilter->sample_rate,
895  ifilter->sample_rate,
896  av_get_sample_fmt_name(ifilter->format));
897  if (ifilter->channel_layout)
898  av_bprintf(&args, ":channel_layout=0x%"PRIx64,
899  ifilter->channel_layout);
900  else
901  av_bprintf(&args, ":channels=%d", ifilter->channels);
902  snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
903  ist->file_index, ist->st->index);
904 
905  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
906  name, args.str, NULL,
907  fg->graph)) < 0)
908  return ret;
909  last_filter = ifilter->filter;
910 
911 #define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
912  AVFilterContext *filt_ctx; \
913  \
914  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
915  "similarly to -af " filter_name "=%s.\n", arg); \
916  \
917  snprintf(name, sizeof(name), "graph_%d_%s_in_%d_%d", \
918  fg->index, filter_name, ist->file_index, ist->st->index); \
919  ret = avfilter_graph_create_filter(&filt_ctx, \
920  avfilter_get_by_name(filter_name), \
921  name, arg, NULL, fg->graph); \
922  if (ret < 0) \
923  return ret; \
924  \
925  ret = avfilter_link(last_filter, 0, filt_ctx, 0); \
926  if (ret < 0) \
927  return ret; \
928  \
929  last_filter = filt_ctx; \
930 } while (0)
931 
932  if (audio_sync_method > 0) {
933  char args[256] = {0};
934 
935  av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
936  if (audio_drift_threshold != 0.1)
937  av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
938  if (!fg->reconfiguration)
939  av_strlcatf(args, sizeof(args), ":first_pts=0");
940  AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
941  }
942 
943 // if (ost->audio_channels_mapped) {
944 // int i;
945 // AVBPrint pan_buf;
946 // av_bprint_init(&pan_buf, 256, 8192);
947 // av_bprintf(&pan_buf, "0x%"PRIx64,
948 // av_get_default_channel_layout(ost->audio_channels_mapped));
949 // for (i = 0; i < ost->audio_channels_mapped; i++)
950 // if (ost->audio_channels_map[i] != -1)
951 // av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
952 // AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
953 // av_bprint_finalize(&pan_buf, NULL);
954 // }
955 
956  if (audio_volume != 256) {
957  char args[256];
958 
959  av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
960  "audio filter instead.\n");
961 
962  snprintf(args, sizeof(args), "%f", audio_volume / 256.);
963  AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
964  }
965 
966  snprintf(name, sizeof(name), "trim for input stream %d:%d",
967  ist->file_index, ist->st->index);
968  if (copy_ts) {
969  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
971  tsoffset += f->ctx->start_time;
972  }
973  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
974  AV_NOPTS_VALUE : tsoffset, f->recording_time,
975  &last_filter, &pad_idx, name);
976  if (ret < 0)
977  return ret;
978 
979  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
980  return ret;
981 
982  return 0;
983 }
984 
986  AVFilterInOut *in)
987 {
988  if (!ifilter->ist->dec) {
990  "No decoder for stream #%d:%d, filtering impossible\n",
991  ifilter->ist->file_index, ifilter->ist->st->index);
993  }
994  switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
995  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
996  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
997  default: av_assert0(0);
998  }
999 }
1000 
1002 {
1003  int i;
1004  for (i = 0; i < fg->nb_outputs; i++)
1005  fg->outputs[i]->filter = (AVFilterContext *)NULL;
1006  for (i = 0; i < fg->nb_inputs; i++)
1007  fg->inputs[i]->filter = (AVFilterContext *)NULL;
1008  avfilter_graph_free(&fg->graph);
1009 }
1010 
1012 {
1013  AVFilterInOut *inputs, *outputs, *cur;
1014  int ret, i, simple = filtergraph_is_simple(fg);
1015  const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
1016  fg->graph_desc;
1017 
1018  cleanup_filtergraph(fg);
1019  if (!(fg->graph = avfilter_graph_alloc()))
1020  return AVERROR(ENOMEM);
1021 
1022  if (simple) {
1023  OutputStream *ost = fg->outputs[0]->ost;
1024  char args[512];
1025  AVDictionaryEntry *e = NULL;
1026 
1028 
1029  args[0] = 0;
1030  while ((e = av_dict_get(ost->sws_dict, "", e,
1032  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1033  }
1034  if (strlen(args))
1035  args[strlen(args)-1] = 0;
1036  fg->graph->scale_sws_opts = av_strdup(args);
1037 
1038  args[0] = 0;
1039  while ((e = av_dict_get(ost->swr_opts, "", e,
1041  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1042  }
1043  if (strlen(args))
1044  args[strlen(args)-1] = 0;
1045  av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
1046 
1047  args[0] = '\0';
1048  while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, "", e,
1050  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1051  }
1052  if (strlen(args))
1053  args[strlen(args) - 1] = '\0';
1054 
1055  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1056  if (e)
1057  av_opt_set(fg->graph, "threads", e->value, 0);
1058  } else {
1060  }
1061 
1062  if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
1063  goto fail;
1064 
1065  ret = hw_device_setup_for_filter(fg);
1066  if (ret < 0)
1067  goto fail;
1068 
1069  if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
1070  const char *num_inputs;
1071  const char *num_outputs;
1072  if (!outputs) {
1073  num_outputs = "0";
1074  } else if (outputs->next) {
1075  num_outputs = ">1";
1076  } else {
1077  num_outputs = "1";
1078  }
1079  if (!inputs) {
1080  num_inputs = "0";
1081  } else if (inputs->next) {
1082  num_inputs = ">1";
1083  } else {
1084  num_inputs = "1";
1085  }
1086  av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1087  "to have exactly 1 input and 1 output."
1088  " However, it had %s input(s) and %s output(s)."
1089  " Please adjust, or use a complex filtergraph (-filter_complex) instead.\n",
1090  graph_desc, num_inputs, num_outputs);
1091  ret = AVERROR(EINVAL);
1092  goto fail;
1093  }
1094 
1095  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1096  if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
1097  avfilter_inout_free(&inputs);
1098  avfilter_inout_free(&outputs);
1099  goto fail;
1100  }
1101  avfilter_inout_free(&inputs);
1102 
1103  for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1104  configure_output_filter(fg, fg->outputs[i], cur);
1105  avfilter_inout_free(&outputs);
1106 
1107  if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1108  goto fail;
1109 
1110  /* limit the lists of allowed formats to the ones selected, to
1111  * make sure they stay the same if the filtergraph is reconfigured later */
1112  for (i = 0; i < fg->nb_outputs; i++) {
1113  OutputFilter *ofilter = fg->outputs[i];
1114  AVFilterContext *sink = ofilter->filter;
1115 
1116  ofilter->format = av_buffersink_get_format(sink);
1117 
1118  ofilter->width = av_buffersink_get_w(sink);
1119  ofilter->height = av_buffersink_get_h(sink);
1120 
1121  ofilter->sample_rate = av_buffersink_get_sample_rate(sink);
1123  }
1124 
1125  fg->reconfiguration = 1;
1126 
1127  for (i = 0; i < fg->nb_outputs; i++) {
1128  OutputStream *ost = fg->outputs[i]->ost;
1129  if (!ost->enc) {
1130  /* identical to the same check in ffmpeg.c, needed because
1131  complex filter graphs are initialized earlier */
1132  av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
1133  avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
1134  ret = AVERROR(EINVAL);
1135  goto fail;
1136  }
1137  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1138  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
1140  ost->enc_ctx->frame_size);
1141  }
1142 
1143  for (i = 0; i < fg->nb_inputs; i++) {
1144  while (av_fifo_size(fg->inputs[i]->frame_queue)) {
1145  AVFrame *tmp;
1146  av_fifo_generic_read(fg->inputs[i]->frame_queue, &tmp, sizeof(tmp), NULL);
1147  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, tmp);
1148  av_frame_free(&tmp);
1149  if (ret < 0)
1150  goto fail;
1151  }
1152  }
1153 
1154  /* send the EOFs for the finished inputs */
1155  for (i = 0; i < fg->nb_inputs; i++) {
1156  if (fg->inputs[i]->eof) {
1157  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, NULL);
1158  if (ret < 0)
1159  goto fail;
1160  }
1161  }
1162 
1163  /* process queued up subtitle packets */
1164  for (i = 0; i < fg->nb_inputs; i++) {
1165  InputStream *ist = fg->inputs[i]->ist;
1166  if (ist->sub2video.sub_queue && ist->sub2video.frame) {
1167  while (av_fifo_size(ist->sub2video.sub_queue)) {
1168  AVSubtitle tmp;
1169  av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
1170  sub2video_update(ist, INT64_MIN, &tmp);
1171  avsubtitle_free(&tmp);
1172  }
1173  }
1174  }
1175 
1176  return 0;
1177 
1178 fail:
1179  cleanup_filtergraph(fg);
1180  return ret;
1181 }
1182 
1184 {
1185  av_buffer_unref(&ifilter->hw_frames_ctx);
1186 
1187  ifilter->format = frame->format;
1188 
1189  ifilter->width = frame->width;
1190  ifilter->height = frame->height;
1191  ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
1192 
1193  ifilter->sample_rate = frame->sample_rate;
1194  ifilter->channels = frame->channels;
1195  ifilter->channel_layout = frame->channel_layout;
1196 
1197  if (frame->hw_frames_ctx) {
1198  ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
1199  if (!ifilter->hw_frames_ctx)
1200  return AVERROR(ENOMEM);
1201  }
1202 
1203  return 0;
1204 }
1205 
1207 {
1208  int i;
1209  for (i = 0; i < fg->nb_inputs; i++)
1210  if (fg->inputs[i]->ist == ist)
1211  return 1;
1212  return 0;
1213 }
1214 
1216 {
1217  return !fg->graph_desc;
1218 }
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
#define NULL
Definition: coverity.c:32
int width
Definition: ffmpeg.h:271
int keep_pix_fmt
Definition: ffmpeg.h:531
Bytestream IO Context.
Definition: avio.h:161
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:561
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:264
int nb_outputs
Definition: ffmpeg.h:293
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
AVDictionary * swr_opts
Definition: ffmpeg.h:512
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:303
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer)
Return the written size and a pointer to the buffer.
Definition: aviobuf.c:1414
AVRational frame_rate
Definition: ffmpeg.h:479
double get_rotation(AVStream *st)
Definition: cmdutils.c:2201
int accurate_seek
Definition: ffmpeg.h:415
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
Main libavfilter public API header.
Memory buffer source API.
const char * desc
Definition: nvenc.c:87
AVRational framerate
Definition: ffmpeg.h:334
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
int height
Definition: ffmpeg.h:248
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:2097
AVFilterInOut * out_tmp
Definition: ffmpeg.h:267
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:1050
int decoding_needed
Definition: ffmpeg.h:301
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
int num
Numerator.
Definition: rational.h:59
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
int index
stream index in AVFormatContext
Definition: avformat.h:877
int init_complex_filtergraph(FilterGraph *fg)
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
GLint GLenum type
Definition: opengl_enc.c:104
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
discard all
Definition: avcodec.h:236
int nb_input_streams
Definition: ffmpeg.c:148
#define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
static enum AVPixelFormat * get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
Definition: ffmpeg_filter.c:42
AVCodec.
Definition: codec.h:190
int avio_open_dyn_buf(AVIOContext **s)
Open a write only memory stream.
Definition: aviobuf.c:1369
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:562
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:284
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
struct FilterGraph * graph
Definition: ffmpeg.h:239
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:647
Format I/O context.
Definition: avformat.h:1351
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5135
int configure_filtergraph(FilterGraph *fg)
memory buffer sink API for audio and video
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCodec *codec, enum AVPixelFormat target)
Definition: ffmpeg_filter.c:63
struct InputStream * ist
Definition: ffmpeg.h:238
char * name
name of this filter instance
Definition: avfilter.h:343
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:287
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
int user_set_discard
Definition: ffmpeg.h:300
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
static int64_t start_time
Definition: ffplay.c:332
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AVDictionary * sws_dict
Definition: ffmpeg.h:511
int width
Video only.
Definition: codec_par.h:126
void check_filter_outputs(void)
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
#define AV_CODEC_PROP_LOSSLESS
Codec supports lossless compression.
Definition: codec_desc.h:82
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
int audio_sync_method
Definition: ffmpeg_opt.c:153
int shortest
Definition: ffmpeg.h:565
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1419
int64_t duration
Definition: movenc.c:63
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
enum AVMediaType type
Definition: ffmpeg.h:241
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:869
AVDictionary * resample_opts
Definition: ffmpeg.h:513
AVFilterContext * filter
Definition: ffmpeg.h:261
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
int nb_input_files
Definition: ffmpeg.c:150
AVCodec * dec
Definition: ffmpeg.h:306
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs)
Add a graph described by a string to a graph.
Definition: graphparser.c:407
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
int file_index
Definition: ffmpeg.h:297
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
#define av_log(a,...)
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:173
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:240
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1593
A filter pad used for either input or output.
Definition: internal.h:54
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:353
uint64_t channel_layout
Definition: ffmpeg.h:275
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:350
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
enum AVCodecID id
Definition: codec.h:204
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
int hw_device_setup_for_filter(FilterGraph *fg)
Definition: ffmpeg_hw.c:525
AVRational sample_aspect_ratio
Definition: ffmpeg.h:249
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:46
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define GET_CH_LAYOUT_NAME(ch_layout)
Definition: cmdutils.h:638
int sample_rate
Definition: ffmpeg.h:251
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
AVFilterContext * filter
Definition: ffmpeg.h:237
int format
Definition: ffmpeg.h:246
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
Display matrix.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: codec.h:197
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int eof
Definition: ffmpeg.h:257
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format...
Definition: buffersrc.h:78
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:123
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:122
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1237
int filter_nbthreads
Definition: ffmpeg_opt.c:172
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
OutputFilter * filter
Definition: ffmpeg.h:505
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:485
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:520
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1407
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int channels
number of audio channels, only used for audio.
Definition: frame.h:614
audio channel layout utility functions
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:211
This structure contains the parameters describing the frames that will be passed to this filter...
Definition: buffersrc.h:73
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
external API header
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:560
struct OutputStream * ost
Definition: ffmpeg.h:262
int width
picture width / height.
Definition: avcodec.h:699
uint8_t w
Definition: llviddspenc.c:38
char * apad
Definition: ffmpeg.h:514
int width
Definition: ffmpeg.h:248
AVFormatContext * ctx
Definition: movenc.c:48
int nb_filtergraphs
Definition: ffmpeg.c:158
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int audio_channels_mapped
Definition: ffmpeg.h:500
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
enum AVCodecID codec_id
Definition: vaapi_decode.c:369
int height
Definition: ffmpeg.h:271
int sample_rate
Definition: ffmpeg.h:274
#define GET_SAMPLE_FMT_NAME(sample_fmt)
Definition: cmdutils.h:631
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:133
AVCodecContext * enc
Definition: muxing.c:55
int start_at_zero
Definition: ffmpeg_opt.c:162
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:935
int audio_volume
Definition: ffmpeg_opt.c:152
Stream structure.
Definition: avformat.h:876
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
InputFilter ** filters
Definition: ffmpeg.h:360
enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
Definition: imgconvert.c:54
#define GET_SAMPLE_RATE_NAME(rate)
Definition: cmdutils.h:634
int64_t recording_time
Definition: ffmpeg.h:410
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1206
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
AVStream * st
Definition: ffmpeg.h:298
sample_rate
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define AV_BPRINT_SIZE_AUTOMATIC
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
enum AVMediaType codec_type
Definition: avcodec.h:534
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
enum AVCodecID codec_id
Definition: avcodec.h:536
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
int ist_index
Definition: ffmpeg.h:399
const char * graph_desc
Definition: ffmpeg.h:285
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:408
void avio_w8(AVIOContext *s, int b)
Definition: aviobuf.c:203
main external API structure.
Definition: avcodec.h:526
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:372
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1109
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:396
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
AVCodecContext * enc_ctx
Definition: ffmpeg.h:467
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:255
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int channels
Definition: ffmpeg.h:252
int * audio_channels_map
Definition: ffmpeg.h:499
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:113
int sample_rate
Sample rate of the audio data.
Definition: frame.h:480
Filter definition.
Definition: avfilter.h:144
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1011
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:446
AVCodecContext * dec_ctx
Definition: ffmpeg.h:305
AVMediaType
Definition: avutil.h:199
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
const char * name
Filter name.
Definition: avfilter.h:148
int av_buffersink_get_w(const AVFilterContext *ctx)
int autorotate
Definition: ffmpeg.h:338
#define snprintf
Definition: snprintf.h:34
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:1045
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: codec_desc.h:38
float audio_drift_threshold
Definition: ffmpeg_opt.c:148
char * name
unique name for this input/output in the list
Definition: avfilter.h:1005
int nb_filters
Definition: ffmpeg.h:361
all automatic conversions disabled
Definition: avfilter.h:976
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1456
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:58
int autoscale
Definition: ffmpeg.h:484
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
static AVStream * ost
int reconfiguration
Definition: ffmpeg.h:288
struct FilterGraph * graph
Definition: ffmpeg.h:263
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
sample_rates
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:98
AVStream * st
Definition: muxing.c:54
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1193
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
uint64_t channel_layout
Definition: ffmpeg.h:253
int copy_ts
Definition: ffmpeg_opt.c:161
AVFormatContext * ctx
Definition: ffmpeg.h:396
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:52
int do_deinterlace
Definition: ffmpeg_opt.c:156
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:622
pixel format definitions
char * avfilter
Definition: ffmpeg.h:506
uint8_t * name
Definition: ffmpeg.h:240
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int len
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
int channels
number of audio channels
Definition: avcodec.h:1187
OutputFilter ** outputs
Definition: ffmpeg.h:292
InputFile ** input_files
Definition: ffmpeg.c:149
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:87
AVFormatContext * ctx
Definition: ffmpeg.h:558
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3401
int filtergraph_is_simple(FilterGraph *fg)
An instance of a filter.
Definition: avfilter.h:338
static char * choose_pix_fmts(OutputFilter *ofilter)
static void cleanup_filtergraph(FilterGraph *fg)
AVDictionary * encoder_opts
Definition: ffmpeg.h:510
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:723
int height
Definition: frame.h:366
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:290
#define av_freep(p)
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
OutputFile ** output_files
Definition: ffmpeg.c:154
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1023
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:213
int format
Definition: ffmpeg.h:273
formats
Definition: signature.h:48
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
int discard
Definition: ffmpeg.h:299
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:905
void choose_sample_fmt(AVStream *st, AVCodec *codec)
Definition: ffmpeg_filter.c:93
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int nb_inputs
Definition: ffmpeg.h:291
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:931
int index
Definition: ffmpeg.h:447
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
enum AVMediaType type
Definition: ffmpeg.h:268
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:243
#define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg)
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:465
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1589
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
int i
Definition: input.c:406
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
InputStream ** input_streams
Definition: ffmpeg.c:147
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
Writes a formatted string to the context.
discard nothing
Definition: avcodec.h:230
const char * name
Definition: opengl_enc.c:102
static uint8_t tmp[11]
Definition: aes_ctr.c:26