FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
30 
31 #include "libavutil/avassert.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/bprint.h"
35 #include "libavutil/display.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/samplefmt.h"
41 
42 static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
43 {
44  static const enum AVPixelFormat mjpeg_formats[] =
48  static const enum AVPixelFormat ljpeg_formats[] =
52  AV_PIX_FMT_NONE};
53 
54  if (codec_id == AV_CODEC_ID_MJPEG) {
55  return mjpeg_formats;
56  } else if (codec_id == AV_CODEC_ID_LJPEG) {
57  return ljpeg_formats;
58  } else {
59  return default_formats;
60  }
61 }
62 
64  const AVCodec *codec, enum AVPixelFormat target)
65 {
66  if (codec && codec->pix_fmts) {
67  const enum AVPixelFormat *p = codec->pix_fmts;
69  //FIXME: This should check for AV_PIX_FMT_FLAG_ALPHA after PAL8 pixel format without alpha is implemented
70  int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
71  enum AVPixelFormat best= AV_PIX_FMT_NONE;
72 
75  }
76  for (; *p != AV_PIX_FMT_NONE; p++) {
77  best = av_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
78  if (*p == target)
79  break;
80  }
81  if (*p == AV_PIX_FMT_NONE) {
82  if (target != AV_PIX_FMT_NONE)
84  "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
85  av_get_pix_fmt_name(target),
86  codec->name,
87  av_get_pix_fmt_name(best));
88  return best;
89  }
90  }
91  return target;
92 }
93 
94 static char *choose_pix_fmts(OutputFilter *ofilter)
95 {
96  OutputStream *ost = ofilter->ost;
97  AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
98  if (strict_dict)
99  // used by choose_pixel_fmt() and below
100  av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
101 
102  if (ost->keep_pix_fmt) {
105  if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
106  return NULL;
108  }
109  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
110  return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
111  } else if (ost->enc && ost->enc->pix_fmts) {
112  const enum AVPixelFormat *p;
113  AVIOContext *s = NULL;
114  uint8_t *ret;
115  int len;
116 
117  if (avio_open_dyn_buf(&s) < 0)
118  exit_program(1);
119 
120  p = ost->enc->pix_fmts;
123  }
124 
125  for (; *p != AV_PIX_FMT_NONE; p++) {
126  const char *name = av_get_pix_fmt_name(*p);
127  avio_printf(s, "%s|", name);
128  }
129  len = avio_close_dyn_buf(s, &ret);
130  ret[len - 1] = 0;
131  return ret;
132  } else
133  return NULL;
134 }
135 
136 /* Define a function for building a string containing a list of
137  * allowed formats. */
138 #define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
139 static char *choose_ ## suffix (OutputFilter *ofilter) \
140 { \
141  if (ofilter->var != none) { \
142  get_name(ofilter->var); \
143  return av_strdup(name); \
144  } else if (ofilter->supported_list) { \
145  const type *p; \
146  AVIOContext *s = NULL; \
147  uint8_t *ret; \
148  int len; \
149  \
150  if (avio_open_dyn_buf(&s) < 0) \
151  exit_program(1); \
152  \
153  for (p = ofilter->supported_list; *p != none; p++) { \
154  get_name(*p); \
155  avio_printf(s, "%s|", name); \
156  } \
157  len = avio_close_dyn_buf(s, &ret); \
158  ret[len - 1] = 0; \
159  return ret; \
160  } else \
161  return NULL; \
162 }
163 
164 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
165 // GET_PIX_FMT_NAME)
166 
169 
172 
173 DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
175 
177 {
178  FilterGraph *fg = av_mallocz(sizeof(*fg));
179 
180  if (!fg)
181  exit_program(1);
182  fg->index = nb_filtergraphs;
183 
184  GROW_ARRAY(fg->outputs, fg->nb_outputs);
185  if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
186  exit_program(1);
187  fg->outputs[0]->ost = ost;
188  fg->outputs[0]->graph = fg;
189  fg->outputs[0]->format = -1;
190 
191  ost->filter = fg->outputs[0];
192 
193  GROW_ARRAY(fg->inputs, fg->nb_inputs);
194  if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
195  exit_program(1);
196  fg->inputs[0]->ist = ist;
197  fg->inputs[0]->graph = fg;
198  fg->inputs[0]->format = -1;
199 
200  fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
201  if (!fg->inputs[0]->frame_queue)
202  exit_program(1);
203 
204  GROW_ARRAY(ist->filters, ist->nb_filters);
205  ist->filters[ist->nb_filters - 1] = fg->inputs[0];
206 
208  filtergraphs[nb_filtergraphs - 1] = fg;
209 
210  return 0;
211 }
212 
213 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
214 {
215  AVFilterContext *ctx = inout->filter_ctx;
216  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
217  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
218  AVIOContext *pb;
219  uint8_t *res = NULL;
220 
221  if (avio_open_dyn_buf(&pb) < 0)
222  exit_program(1);
223 
224  avio_printf(pb, "%s", ctx->filter->name);
225  if (nb_pads > 1)
226  avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));
227  avio_w8(pb, 0);
228  avio_close_dyn_buf(pb, &res);
229  return res;
230 }
231 
233 {
234  InputStream *ist = NULL;
236  int i;
237 
238  // TODO: support other filter types
239  if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
240  av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
241  "currently.\n");
242  exit_program(1);
243  }
244 
245  if (in->name) {
247  AVStream *st = NULL;
248  char *p;
249  int file_idx = strtol(in->name, &p, 0);
250 
251  if (file_idx < 0 || file_idx >= nb_input_files) {
252  av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
253  file_idx, fg->graph_desc);
254  exit_program(1);
255  }
256  s = input_files[file_idx]->ctx;
257 
258  for (i = 0; i < s->nb_streams; i++) {
259  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
260  if (stream_type != type &&
261  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
262  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
263  continue;
264  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
265  st = s->streams[i];
266  break;
267  }
268  }
269  if (!st) {
270  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
271  "matches no streams.\n", p, fg->graph_desc);
272  exit_program(1);
273  }
274  ist = input_streams[input_files[file_idx]->ist_index + st->index];
275  if (ist->user_set_discard == AVDISCARD_ALL) {
276  av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
277  "matches a disabled input stream.\n", p, fg->graph_desc);
278  exit_program(1);
279  }
280  } else {
281  /* find the first unused stream of corresponding type */
282  for (i = 0; i < nb_input_streams; i++) {
283  ist = input_streams[i];
284  if (ist->user_set_discard == AVDISCARD_ALL)
285  continue;
286  if (ist->dec_ctx->codec_type == type && ist->discard)
287  break;
288  }
289  if (i == nb_input_streams) {
290  av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
291  "unlabeled input pad %d on filter %s\n", in->pad_idx,
292  in->filter_ctx->name);
293  exit_program(1);
294  }
295  }
296  av_assert0(ist);
297 
298  ist->discard = 0;
300  ist->st->discard = AVDISCARD_NONE;
301 
302  GROW_ARRAY(fg->inputs, fg->nb_inputs);
303  if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
304  exit_program(1);
305  fg->inputs[fg->nb_inputs - 1]->ist = ist;
306  fg->inputs[fg->nb_inputs - 1]->graph = fg;
307  fg->inputs[fg->nb_inputs - 1]->format = -1;
308  fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
309  fg->inputs[fg->nb_inputs - 1]->name = describe_filter_link(fg, in, 1);
310 
311  fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
312  if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
313  exit_program(1);
314 
315  GROW_ARRAY(ist->filters, ist->nb_filters);
316  ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
317 }
318 
320 {
321  AVFilterInOut *inputs, *outputs, *cur;
322  AVFilterGraph *graph;
323  int ret = 0;
324 
325  /* this graph is only used for determining the kinds of inputs
326  * and outputs we have, and is discarded on exit from this function */
327  graph = avfilter_graph_alloc();
328  if (!graph)
329  return AVERROR(ENOMEM);
330  graph->nb_threads = 1;
331 
332  ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
333  if (ret < 0)
334  goto fail;
335 
336  for (cur = inputs; cur; cur = cur->next)
337  init_input_filter(fg, cur);
338 
339  for (cur = outputs; cur;) {
340  GROW_ARRAY(fg->outputs, fg->nb_outputs);
341  fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]));
342  if (!fg->outputs[fg->nb_outputs - 1])
343  exit_program(1);
344 
345  fg->outputs[fg->nb_outputs - 1]->graph = fg;
346  fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
348  cur->pad_idx);
349  fg->outputs[fg->nb_outputs - 1]->name = describe_filter_link(fg, cur, 0);
350  cur = cur->next;
351  fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
352  }
353 
354 fail:
355  avfilter_inout_free(&inputs);
356  avfilter_graph_free(&graph);
357  return ret;
358 }
359 
360 static int insert_trim(int64_t start_time, int64_t duration,
361  AVFilterContext **last_filter, int *pad_idx,
362  const char *filter_name)
363 {
364  AVFilterGraph *graph = (*last_filter)->graph;
366  const AVFilter *trim;
367  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
368  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
369  int ret = 0;
370 
371  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
372  return 0;
373 
374  trim = avfilter_get_by_name(name);
375  if (!trim) {
376  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
377  "recording time.\n", name);
379  }
380 
381  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
382  if (!ctx)
383  return AVERROR(ENOMEM);
384 
385  if (duration != INT64_MAX) {
386  ret = av_opt_set_int(ctx, "durationi", duration,
388  }
389  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
390  ret = av_opt_set_int(ctx, "starti", start_time,
392  }
393  if (ret < 0) {
394  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
395  return ret;
396  }
397 
398  ret = avfilter_init_str(ctx, NULL);
399  if (ret < 0)
400  return ret;
401 
402  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
403  if (ret < 0)
404  return ret;
405 
406  *last_filter = ctx;
407  *pad_idx = 0;
408  return 0;
409 }
410 
411 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
412  const char *filter_name, const char *args)
413 {
414  AVFilterGraph *graph = (*last_filter)->graph;
416  int ret;
417 
418  ret = avfilter_graph_create_filter(&ctx,
419  avfilter_get_by_name(filter_name),
420  filter_name, args, NULL, graph);
421  if (ret < 0)
422  return ret;
423 
424  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
425  if (ret < 0)
426  return ret;
427 
428  *last_filter = ctx;
429  *pad_idx = 0;
430  return 0;
431 }
432 
434 {
435  char *pix_fmts;
436  OutputStream *ost = ofilter->ost;
437  OutputFile *of = output_files[ost->file_index];
438  AVFilterContext *last_filter = out->filter_ctx;
439  int pad_idx = out->pad_idx;
440  int ret;
441  char name[255];
442 
443  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
444  ret = avfilter_graph_create_filter(&ofilter->filter,
445  avfilter_get_by_name("buffersink"),
446  name, NULL, NULL, fg->graph);
447 
448  if (ret < 0)
449  return ret;
450 
451  if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) {
452  char args[255];
454  AVDictionaryEntry *e = NULL;
455 
456  snprintf(args, sizeof(args), "%d:%d",
457  ofilter->width, ofilter->height);
458 
459  while ((e = av_dict_get(ost->sws_dict, "", e,
461  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
462  }
463 
464  snprintf(name, sizeof(name), "scaler_out_%d_%d",
465  ost->file_index, ost->index);
466  if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
467  name, args, NULL, fg->graph)) < 0)
468  return ret;
469  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
470  return ret;
471 
472  last_filter = filter;
473  pad_idx = 0;
474  }
475 
476  if ((pix_fmts = choose_pix_fmts(ofilter))) {
478  snprintf(name, sizeof(name), "format_out_%d_%d",
479  ost->file_index, ost->index);
480  ret = avfilter_graph_create_filter(&filter,
481  avfilter_get_by_name("format"),
482  "format", pix_fmts, NULL, fg->graph);
483  av_freep(&pix_fmts);
484  if (ret < 0)
485  return ret;
486  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
487  return ret;
488 
489  last_filter = filter;
490  pad_idx = 0;
491  }
492 
493  if (ost->frame_rate.num && 0) {
494  AVFilterContext *fps;
495  char args[255];
496 
497  snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
498  ost->frame_rate.den);
499  snprintf(name, sizeof(name), "fps_out_%d_%d",
500  ost->file_index, ost->index);
502  name, args, NULL, fg->graph);
503  if (ret < 0)
504  return ret;
505 
506  ret = avfilter_link(last_filter, pad_idx, fps, 0);
507  if (ret < 0)
508  return ret;
509  last_filter = fps;
510  pad_idx = 0;
511  }
512 
513  snprintf(name, sizeof(name), "trim_out_%d_%d",
514  ost->file_index, ost->index);
515  ret = insert_trim(of->start_time, of->recording_time,
516  &last_filter, &pad_idx, name);
517  if (ret < 0)
518  return ret;
519 
520 
521  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
522  return ret;
523 
524  return 0;
525 }
526 
528 {
529  OutputStream *ost = ofilter->ost;
530  OutputFile *of = output_files[ost->file_index];
531  AVCodecContext *codec = ost->enc_ctx;
532  AVFilterContext *last_filter = out->filter_ctx;
533  int pad_idx = out->pad_idx;
535  char name[255];
536  int ret;
537 
538  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
539  ret = avfilter_graph_create_filter(&ofilter->filter,
540  avfilter_get_by_name("abuffersink"),
541  name, NULL, NULL, fg->graph);
542  if (ret < 0)
543  return ret;
544  if ((ret = av_opt_set_int(ofilter->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
545  return ret;
546 
547 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
548  AVFilterContext *filt_ctx; \
549  \
550  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
551  "similarly to -af " filter_name "=%s.\n", arg); \
552  \
553  ret = avfilter_graph_create_filter(&filt_ctx, \
554  avfilter_get_by_name(filter_name), \
555  filter_name, arg, NULL, fg->graph); \
556  if (ret < 0) \
557  return ret; \
558  \
559  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
560  if (ret < 0) \
561  return ret; \
562  \
563  last_filter = filt_ctx; \
564  pad_idx = 0; \
565 } while (0)
566  if (ost->audio_channels_mapped) {
567  int i;
568  AVBPrint pan_buf;
569  av_bprint_init(&pan_buf, 256, 8192);
570  av_bprintf(&pan_buf, "0x%"PRIx64,
572  for (i = 0; i < ost->audio_channels_mapped; i++)
573  if (ost->audio_channels_map[i] != -1)
574  av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
575 
576  AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
577  av_bprint_finalize(&pan_buf, NULL);
578  }
579 
580  if (codec->channels && !codec->channel_layout)
582 
583  sample_fmts = choose_sample_fmts(ofilter);
584  sample_rates = choose_sample_rates(ofilter);
585  channel_layouts = choose_channel_layouts(ofilter);
586  if (sample_fmts || sample_rates || channel_layouts) {
588  char args[256];
589  args[0] = 0;
590 
591  if (sample_fmts)
592  av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
593  sample_fmts);
594  if (sample_rates)
595  av_strlcatf(args, sizeof(args), "sample_rates=%s:",
596  sample_rates);
597  if (channel_layouts)
598  av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
599  channel_layouts);
600 
601  av_freep(&sample_fmts);
602  av_freep(&sample_rates);
603  av_freep(&channel_layouts);
604 
605  snprintf(name, sizeof(name), "format_out_%d_%d",
606  ost->file_index, ost->index);
607  ret = avfilter_graph_create_filter(&format,
608  avfilter_get_by_name("aformat"),
609  name, args, NULL, fg->graph);
610  if (ret < 0)
611  return ret;
612 
613  ret = avfilter_link(last_filter, pad_idx, format, 0);
614  if (ret < 0)
615  return ret;
616 
617  last_filter = format;
618  pad_idx = 0;
619  }
620 
621  if (ost->apad && of->shortest) {
622  char args[256];
623  int i;
624 
625  for (i=0; i<of->ctx->nb_streams; i++)
627  break;
628 
629  if (i<of->ctx->nb_streams) {
630  snprintf(args, sizeof(args), "%s", ost->apad);
631  AUTO_INSERT_FILTER("-apad", "apad", args);
632  }
633  }
634 
635  snprintf(name, sizeof(name), "trim for output stream %d:%d",
636  ost->file_index, ost->index);
637  ret = insert_trim(of->start_time, of->recording_time,
638  &last_filter, &pad_idx, name);
639  if (ret < 0)
640  return ret;
641 
642  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
643  return ret;
644 
645  return 0;
646 }
647 
650 {
651  if (!ofilter->ost) {
652  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
653  exit_program(1);
654  }
655 
656  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
657  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
658  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
659  default: av_assert0(0);
660  }
661 }
662 
664 {
665  int i;
666  for (i = 0; i < nb_filtergraphs; i++) {
667  int n;
668  for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
670  if (!output->ost) {
671  av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", output->name);
672  exit_program(1);
673  }
674  }
675  }
676 }
677 
678 static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
679 {
681  int i, w, h;
682 
683  /* Compute the size of the canvas for the subtitles stream.
684  If the subtitles codecpar has set a size, use it. Otherwise use the
685  maximum dimensions of the video streams in the same file. */
686  w = ifilter->width;
687  h = ifilter->height;
688  if (!(w && h)) {
689  for (i = 0; i < avf->nb_streams; i++) {
690  if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
691  w = FFMAX(w, avf->streams[i]->codecpar->width);
692  h = FFMAX(h, avf->streams[i]->codecpar->height);
693  }
694  }
695  if (!(w && h)) {
696  w = FFMAX(w, 720);
697  h = FFMAX(h, 576);
698  }
699  av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
700  }
701  ist->sub2video.w = ifilter->width = w;
702  ist->sub2video.h = ifilter->height = h;
703 
704  ifilter->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
705  ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
706 
707  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
708  palettes for all rectangles are identical or compatible */
709  ifilter->format = AV_PIX_FMT_RGB32;
710 
711  ist->sub2video.frame = av_frame_alloc();
712  if (!ist->sub2video.frame)
713  return AVERROR(ENOMEM);
714  ist->sub2video.last_pts = INT64_MIN;
715  ist->sub2video.end_pts = INT64_MIN;
716 
717  /* sub2video structure has been (re-)initialized.
718  Mark it as such so that the system will be
719  initialized with the first received heartbeat. */
720  ist->sub2video.initialize = 1;
721 
722  return 0;
723 }
724 
726  AVFilterInOut *in)
727 {
728  AVFilterContext *last_filter;
729  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
730  InputStream *ist = ifilter->ist;
732  AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
733  ist->st->time_base;
734  AVRational fr = ist->framerate;
735  AVRational sar;
736  AVBPrint args;
737  char name[255];
738  int ret, pad_idx = 0;
739  int64_t tsoffset = 0;
741 
742  if (!par)
743  return AVERROR(ENOMEM);
744  memset(par, 0, sizeof(*par));
745  par->format = AV_PIX_FMT_NONE;
746 
747  if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
748  av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
749  ret = AVERROR(EINVAL);
750  goto fail;
751  }
752 
753  if (!fr.num)
754  fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
755 
757  ret = sub2video_prepare(ist, ifilter);
758  if (ret < 0)
759  goto fail;
760  }
761 
762  sar = ifilter->sample_aspect_ratio;
763  if(!sar.den)
764  sar = (AVRational){0,1};
766  av_bprintf(&args,
767  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
768  "pixel_aspect=%d/%d",
769  ifilter->width, ifilter->height, ifilter->format,
770  tb.num, tb.den, sar.num, sar.den);
771  if (fr.num && fr.den)
772  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
773  snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
774  ist->file_index, ist->st->index);
775 
776 
777  if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
778  args.str, NULL, fg->graph)) < 0)
779  goto fail;
780  par->hw_frames_ctx = ifilter->hw_frames_ctx;
781  ret = av_buffersrc_parameters_set(ifilter->filter, par);
782  if (ret < 0)
783  goto fail;
784  av_freep(&par);
785  last_filter = ifilter->filter;
786 
787  if (ist->autorotate) {
788  double theta = get_rotation(ist->st);
789 
790  if (fabs(theta - 90) < 1.0) {
791  ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
792  } else if (fabs(theta - 180) < 1.0) {
793  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
794  if (ret < 0)
795  return ret;
796  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
797  } else if (fabs(theta - 270) < 1.0) {
798  ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
799  } else if (fabs(theta) > 1.0) {
800  char rotate_buf[64];
801  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
802  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
803  }
804  if (ret < 0)
805  return ret;
806  }
807 
808  if (do_deinterlace) {
809  AVFilterContext *yadif;
810 
811  snprintf(name, sizeof(name), "deinterlace_in_%d_%d",
812  ist->file_index, ist->st->index);
813  if ((ret = avfilter_graph_create_filter(&yadif,
814  avfilter_get_by_name("yadif"),
815  name, "", NULL,
816  fg->graph)) < 0)
817  return ret;
818 
819  if ((ret = avfilter_link(last_filter, 0, yadif, 0)) < 0)
820  return ret;
821 
822  last_filter = yadif;
823  }
824 
825  snprintf(name, sizeof(name), "trim_in_%d_%d",
826  ist->file_index, ist->st->index);
827  if (copy_ts) {
828  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
830  tsoffset += f->ctx->start_time;
831  }
832  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
833  AV_NOPTS_VALUE : tsoffset, f->recording_time,
834  &last_filter, &pad_idx, name);
835  if (ret < 0)
836  return ret;
837 
838  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
839  return ret;
840  return 0;
841 fail:
842  av_freep(&par);
843 
844  return ret;
845 }
846 
848  AVFilterInOut *in)
849 {
850  AVFilterContext *last_filter;
851  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
852  InputStream *ist = ifilter->ist;
854  AVBPrint args;
855  char name[255];
856  int ret, pad_idx = 0;
857  int64_t tsoffset = 0;
858 
859  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
860  av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
861  return AVERROR(EINVAL);
862  }
863 
865  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
866  1, ifilter->sample_rate,
867  ifilter->sample_rate,
868  av_get_sample_fmt_name(ifilter->format));
869  if (ifilter->channel_layout)
870  av_bprintf(&args, ":channel_layout=0x%"PRIx64,
871  ifilter->channel_layout);
872  else
873  av_bprintf(&args, ":channels=%d", ifilter->channels);
874  snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
875  ist->file_index, ist->st->index);
876 
877  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
878  name, args.str, NULL,
879  fg->graph)) < 0)
880  return ret;
881  last_filter = ifilter->filter;
882 
883 #define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
884  AVFilterContext *filt_ctx; \
885  \
886  av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
887  "similarly to -af " filter_name "=%s.\n", arg); \
888  \
889  snprintf(name, sizeof(name), "graph_%d_%s_in_%d_%d", \
890  fg->index, filter_name, ist->file_index, ist->st->index); \
891  ret = avfilter_graph_create_filter(&filt_ctx, \
892  avfilter_get_by_name(filter_name), \
893  name, arg, NULL, fg->graph); \
894  if (ret < 0) \
895  return ret; \
896  \
897  ret = avfilter_link(last_filter, 0, filt_ctx, 0); \
898  if (ret < 0) \
899  return ret; \
900  \
901  last_filter = filt_ctx; \
902 } while (0)
903 
904  if (audio_sync_method > 0) {
905  char args[256] = {0};
906 
907  av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
908  if (audio_drift_threshold != 0.1)
909  av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
910  if (!fg->reconfiguration)
911  av_strlcatf(args, sizeof(args), ":first_pts=0");
912  AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
913  }
914 
915 // if (ost->audio_channels_mapped) {
916 // int i;
917 // AVBPrint pan_buf;
918 // av_bprint_init(&pan_buf, 256, 8192);
919 // av_bprintf(&pan_buf, "0x%"PRIx64,
920 // av_get_default_channel_layout(ost->audio_channels_mapped));
921 // for (i = 0; i < ost->audio_channels_mapped; i++)
922 // if (ost->audio_channels_map[i] != -1)
923 // av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
924 // AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
925 // av_bprint_finalize(&pan_buf, NULL);
926 // }
927 
928  if (audio_volume != 256) {
929  char args[256];
930 
931  av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
932  "audio filter instead.\n");
933 
934  snprintf(args, sizeof(args), "%f", audio_volume / 256.);
935  AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
936  }
937 
938  snprintf(name, sizeof(name), "trim for input stream %d:%d",
939  ist->file_index, ist->st->index);
940  if (copy_ts) {
941  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
943  tsoffset += f->ctx->start_time;
944  }
945  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
946  AV_NOPTS_VALUE : tsoffset, f->recording_time,
947  &last_filter, &pad_idx, name);
948  if (ret < 0)
949  return ret;
950 
951  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
952  return ret;
953 
954  return 0;
955 }
956 
958  AVFilterInOut *in)
959 {
960  if (!ifilter->ist->dec) {
962  "No decoder for stream #%d:%d, filtering impossible\n",
963  ifilter->ist->file_index, ifilter->ist->st->index);
965  }
966  switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
967  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
968  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
969  default: av_assert0(0);
970  }
971 }
972 
974 {
975  int i;
976  for (i = 0; i < fg->nb_outputs; i++)
977  fg->outputs[i]->filter = (AVFilterContext *)NULL;
978  for (i = 0; i < fg->nb_inputs; i++)
979  fg->inputs[i]->filter = (AVFilterContext *)NULL;
981 }
982 
984 {
985  AVFilterInOut *inputs, *outputs, *cur;
986  int ret, i, simple = filtergraph_is_simple(fg);
987  const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
988  fg->graph_desc;
989 
991  if (!(fg->graph = avfilter_graph_alloc()))
992  return AVERROR(ENOMEM);
993 
994  if (simple) {
995  OutputStream *ost = fg->outputs[0]->ost;
996  char args[512];
997  AVDictionaryEntry *e = NULL;
998 
1000 
1001  args[0] = 0;
1002  while ((e = av_dict_get(ost->sws_dict, "", e,
1004  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1005  }
1006  if (strlen(args))
1007  args[strlen(args)-1] = 0;
1008  fg->graph->scale_sws_opts = av_strdup(args);
1009 
1010  args[0] = 0;
1011  while ((e = av_dict_get(ost->swr_opts, "", e,
1013  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1014  }
1015  if (strlen(args))
1016  args[strlen(args)-1] = 0;
1017  av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
1018 
1019  args[0] = '\0';
1020  while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, "", e,
1022  av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
1023  }
1024  if (strlen(args))
1025  args[strlen(args) - 1] = '\0';
1026 
1027  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1028  if (e)
1029  av_opt_set(fg->graph, "threads", e->value, 0);
1030  } else {
1032  }
1033 
1034  if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
1035  goto fail;
1036 
1037  ret = hw_device_setup_for_filter(fg);
1038  if (ret < 0)
1039  goto fail;
1040 
1041  if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
1042  const char *num_inputs;
1043  const char *num_outputs;
1044  if (!outputs) {
1045  num_outputs = "0";
1046  } else if (outputs->next) {
1047  num_outputs = ">1";
1048  } else {
1049  num_outputs = "1";
1050  }
1051  if (!inputs) {
1052  num_inputs = "0";
1053  } else if (inputs->next) {
1054  num_inputs = ">1";
1055  } else {
1056  num_inputs = "1";
1057  }
1058  av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1059  "to have exactly 1 input and 1 output."
1060  " However, it had %s input(s) and %s output(s)."
1061  " Please adjust, or use a complex filtergraph (-filter_complex) instead.\n",
1062  graph_desc, num_inputs, num_outputs);
1063  ret = AVERROR(EINVAL);
1064  goto fail;
1065  }
1066 
1067  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1068  if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
1069  avfilter_inout_free(&inputs);
1070  avfilter_inout_free(&outputs);
1071  goto fail;
1072  }
1073  avfilter_inout_free(&inputs);
1074 
1075  for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1076  configure_output_filter(fg, fg->outputs[i], cur);
1077  avfilter_inout_free(&outputs);
1078 
1081  if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1082  goto fail;
1083 
1084  /* limit the lists of allowed formats to the ones selected, to
1085  * make sure they stay the same if the filtergraph is reconfigured later */
1086  for (i = 0; i < fg->nb_outputs; i++) {
1087  OutputFilter *ofilter = fg->outputs[i];
1088  AVFilterContext *sink = ofilter->filter;
1089 
1090  ofilter->format = av_buffersink_get_format(sink);
1091 
1092  ofilter->width = av_buffersink_get_w(sink);
1093  ofilter->height = av_buffersink_get_h(sink);
1094 
1095  ofilter->sample_rate = av_buffersink_get_sample_rate(sink);
1097  }
1098 
1099  fg->reconfiguration = 1;
1100 
1101  for (i = 0; i < fg->nb_outputs; i++) {
1102  OutputStream *ost = fg->outputs[i]->ost;
1103  if (!ost->enc) {
1104  /* identical to the same check in ffmpeg.c, needed because
1105  complex filter graphs are initialized earlier */
1106  av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
1107  avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
1108  ret = AVERROR(EINVAL);
1109  goto fail;
1110  }
1111  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1112  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
1114  ost->enc_ctx->frame_size);
1115  }
1116 
1117  for (i = 0; i < fg->nb_inputs; i++) {
1118  while (av_fifo_size(fg->inputs[i]->frame_queue)) {
1119  AVFrame *tmp;
1120  av_fifo_generic_read(fg->inputs[i]->frame_queue, &tmp, sizeof(tmp), NULL);
1121  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, tmp);
1122  av_frame_free(&tmp);
1123  if (ret < 0)
1124  goto fail;
1125  }
1126  }
1127 
1128  /* send the EOFs for the finished inputs */
1129  for (i = 0; i < fg->nb_inputs; i++) {
1130  if (fg->inputs[i]->eof) {
1131  ret = av_buffersrc_add_frame(fg->inputs[i]->filter, NULL);
1132  if (ret < 0)
1133  goto fail;
1134  }
1135  }
1136 
1137  /* process queued up subtitle packets */
1138  for (i = 0; i < fg->nb_inputs; i++) {
1139  InputStream *ist = fg->inputs[i]->ist;
1140  if (ist->sub2video.sub_queue && ist->sub2video.frame) {
1141  while (av_fifo_size(ist->sub2video.sub_queue)) {
1142  AVSubtitle tmp;
1143  av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
1144  sub2video_update(ist, INT64_MIN, &tmp);
1145  avsubtitle_free(&tmp);
1146  }
1147  }
1148  }
1149 
1150  return 0;
1151 
1152 fail:
1153  cleanup_filtergraph(fg);
1154  return ret;
1155 }
1156 
1158 {
1159  av_buffer_unref(&ifilter->hw_frames_ctx);
1160 
1161  ifilter->format = frame->format;
1162 
1163  ifilter->width = frame->width;
1164  ifilter->height = frame->height;
1165  ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
1166 
1167  ifilter->sample_rate = frame->sample_rate;
1168  ifilter->channels = frame->channels;
1169  ifilter->channel_layout = frame->channel_layout;
1170 
1171  if (frame->hw_frames_ctx) {
1172  ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
1173  if (!ifilter->hw_frames_ctx)
1174  return AVERROR(ENOMEM);
1175  }
1176 
1177  return 0;
1178 }
1179 
1181 {
1182  return !fg->graph_desc;
1183 }
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
#define NULL
Definition: coverity.c:32
int width
Definition: ffmpeg.h:275
int keep_pix_fmt
Definition: ffmpeg.h:536
Bytestream IO Context.
Definition: avio.h:161
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:575
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:268
int nb_outputs
Definition: ffmpeg.h:297
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
AVDictionary * swr_opts
Definition: ffmpeg.h:517
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:307
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer)
Return the written size and a pointer to the buffer.
Definition: aviobuf.c:1428
static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
AVRational frame_rate
Definition: ffmpeg.h:483
double get_rotation(AVStream *st)
Definition: cmdutils.c:2199
int accurate_seek
Definition: ffmpeg.h:419
const char * desc
Definition: libsvtav1.c:79
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
Main libavfilter public API header.
Memory buffer source API.
AVRational framerate
Definition: ffmpeg.h:338
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1024
int height
Definition: ffmpeg.h:252
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:2095
AVFilterInOut * out_tmp
Definition: ffmpeg.h:271
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:1052
int decoding_needed
Definition: ffmpeg.h:305
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
int num
Numerator.
Definition: rational.h:59
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
int index
stream index in AVFormatContext
Definition: avformat.h:885
int init_complex_filtergraph(FilterGraph *fg)
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
GLint GLenum type
Definition: opengl_enc.c:104
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
discard all
Definition: avcodec.h:236
int nb_input_streams
Definition: ffmpeg.c:149
#define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
static enum AVPixelFormat * get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
Definition: ffmpeg_filter.c:42
AVCodec.
Definition: codec.h:190
int avio_open_dyn_buf(AVIOContext **s)
Open a write only memory stream.
Definition: aviobuf.c:1383
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:576
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:288
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:855
struct FilterGraph * graph
Definition: ffmpeg.h:243
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:653
Format I/O context.
Definition: avformat.h:1243
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5102
int configure_filtergraph(FilterGraph *fg)
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:242
char * name
name of this filter instance
Definition: avfilter.h:346
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:291
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:352
int user_set_discard
Definition: ffmpeg.h:304
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
static int64_t start_time
Definition: ffplay.c:332
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AVDictionary * sws_dict
Definition: ffmpeg.h:516
int auto_conversion_filters
Definition: ffmpeg_opt.c:177
int width
Video only.
Definition: codec_par.h:126
static enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, const AVCodec *codec, enum AVPixelFormat target)
Definition: ffmpeg_filter.c:63
void check_filter_outputs(void)
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int audio_sync_method
Definition: ffmpeg_opt.c:155
int shortest
Definition: ffmpeg.h:579
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1311
int64_t duration
Definition: movenc.c:63
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
enum AVMediaType type
Definition: ffmpeg.h:245
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:879
AVDictionary * resample_opts
Definition: ffmpeg.h:518
AVFilterContext * filter
Definition: ffmpeg.h:265
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
int nb_input_files
Definition: ffmpeg.c:151
AVCodec * dec
Definition: ffmpeg.h:310
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs)
Add a graph described by a string to a graph.
Definition: graphparser.c:418
int file_index
Definition: ffmpeg.h:301
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
#define av_log(a,...)
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:175
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:241
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1600
A filter pad used for either input or output.
Definition: internal.h:54
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:357
uint64_t channel_layout
Definition: ffmpeg.h:279
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:354
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:348
int hw_device_setup_for_filter(FilterGraph *fg)
Definition: ffmpeg_hw.c:525
AVRational sample_aspect_ratio
Definition: ffmpeg.h:253
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:46
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define GET_CH_LAYOUT_NAME(ch_layout)
Definition: cmdutils.h:638
int sample_rate
Definition: ffmpeg.h:255
FilterGraph ** filtergraphs
Definition: ffmpeg.c:158
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
AVFilterContext * filter
Definition: ffmpeg.h:241
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
int format
Definition: ffmpeg.h:250
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
unsigned nb_outputs
number of output pads
Definition: avfilter.h:354
Display matrix.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: codec.h:197
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int eof
Definition: ffmpeg.h:261
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format...
Definition: buffersrc.h:78
#define FFMAX(a, b)
Definition: common.h:103
#define fail()
Definition: checkasm.h:133
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:122
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1242
int filter_nbthreads
Definition: ffmpeg_opt.c:174
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
OutputFilter * filter
Definition: ffmpeg.h:510
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:491
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:543
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1299
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int channels
number of audio channels, only used for audio.
Definition: frame.h:620
audio channel layout utility functions
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:211
This structure contains the parameters describing the frames that will be passed to this filter...
Definition: buffersrc.h:73
unsigned nb_inputs
number of input pads
Definition: avfilter.h:350
external API header
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:560
struct OutputStream * ost
Definition: ffmpeg.h:266
int width
picture width / height.
Definition: avcodec.h:704
uint8_t w
Definition: llviddspenc.c:39
char * apad
Definition: ffmpeg.h:519
int width
Definition: ffmpeg.h:252
AVFormatContext * ctx
Definition: movenc.c:48
int nb_filtergraphs
Definition: ffmpeg.c:159
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int audio_channels_mapped
Definition: ffmpeg.h:505
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
enum AVCodecID codec_id
Definition: vaapi_decode.c:369
int height
Definition: ffmpeg.h:275
int sample_rate
Definition: ffmpeg.h:278
#define GET_SAMPLE_FMT_NAME(sample_fmt)
Definition: cmdutils.h:631
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1018
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:133
AVCodecContext * enc
Definition: muxing.c:55
int start_at_zero
Definition: ffmpeg_opt.c:164
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:937
int audio_volume
Definition: ffmpeg_opt.c:154
Stream structure.
Definition: avformat.h:884
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1013
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
InputFilter ** filters
Definition: ffmpeg.h:364
#define GET_SAMPLE_RATE_NAME(rate)
Definition: cmdutils.h:634
int64_t recording_time
Definition: ffmpeg.h:414
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1211
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
AVStream * st
Definition: ffmpeg.h:302
sample_rate
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define AV_BPRINT_SIZE_AUTOMATIC
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
enum AVMediaType codec_type
Definition: avcodec.h:539
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
enum AVCodecID codec_id
Definition: avcodec.h:541
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
int ist_index
Definition: ffmpeg.h:403
const char * graph_desc
Definition: ffmpeg.h:289
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:412
void avio_w8(AVIOContext *s, int b)
Definition: aviobuf.c:203
main external API structure.
Definition: avcodec.h:531
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:372
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1148
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:402
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
AVCodecContext * enc_ctx
Definition: ffmpeg.h:471
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:259
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int channels
Definition: ffmpeg.h:256
int * audio_channels_map
Definition: ffmpeg.h:504
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:114
int sample_rate
Sample rate of the audio data.
Definition: frame.h:486
Filter definition.
Definition: avfilter.h:145
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1021
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:450
AVCodecContext * dec_ctx
Definition: ffmpeg.h:309
AVMediaType
Definition: avutil.h:199
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
const char * name
Filter name.
Definition: avfilter.h:149
int av_buffersink_get_w(const AVFilterContext *ctx)
int autorotate
Definition: ffmpeg.h:342
#define snprintf
Definition: snprintf.h:34
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:1047
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
float audio_drift_threshold
Definition: ffmpeg_opt.c:150
char * name
unique name for this input/output in the list
Definition: avfilter.h:1015
int nb_filters
Definition: ffmpeg.h:365
all automatic conversions disabled
Definition: avfilter.h:986
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1348
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:58
int autoscale
Definition: ffmpeg.h:489
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
static AVStream * ost
int reconfiguration
Definition: ffmpeg.h:292
struct FilterGraph * graph
Definition: ffmpeg.h:267
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
sample_rates
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:198
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:98
AVStream * st
Definition: muxing.c:54
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1236
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
uint64_t channel_layout
Definition: ffmpeg.h:257
int copy_ts
Definition: ffmpeg_opt.c:163
AVFormatContext * ctx
Definition: ffmpeg.h:400
enum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
Compute what kind of losses will occur when converting from one specific pixel format to another...
Definition: pixdesc.c:2864
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:52
int do_deinterlace
Definition: ffmpeg_opt.c:158
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:622
pixel format definitions
char * avfilter
Definition: ffmpeg.h:511
uint8_t * name
Definition: ffmpeg.h:244
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int len
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
int channels
number of audio channels
Definition: avcodec.h:1192
OutputFilter ** outputs
Definition: ffmpeg.h:296
InputFile ** input_files
Definition: ffmpeg.c:150
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:87
AVFormatContext * ctx
Definition: ffmpeg.h:572
int filtergraph_is_simple(FilterGraph *fg)
An instance of a filter.
Definition: avfilter.h:341
static char * choose_pix_fmts(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:94
static void cleanup_filtergraph(FilterGraph *fg)
AVDictionary * encoder_opts
Definition: ffmpeg.h:515
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:940
int height
Definition: frame.h:372
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:294
#define av_freep(p)
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
OutputFile ** output_files
Definition: ffmpeg.c:155
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1049
int format
Definition: ffmpeg.h:277
formats
Definition: signature.h:48
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
int discard
Definition: ffmpeg.h:303
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:913
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int nb_inputs
Definition: ffmpeg.h:295
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:939
int index
Definition: ffmpeg.h:451
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
enum AVMediaType type
Definition: ffmpeg.h:272
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:247
#define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg)
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:465
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1596
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:344
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
InputStream ** input_streams
Definition: ffmpeg.c:148
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
Writes a formatted string to the context.
discard nothing
Definition: avcodec.h:230
const char * name
Definition: opengl_enc.c:102
static uint8_t tmp[11]
Definition: aes_ctr.c:27