FFmpeg
transcoding.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Nicolas George
3  * Copyright (c) 2011 Stefano Sabatini
4  * Copyright (c) 2014 Andrey Utkin
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 /**
26  * @file
27  * API example for demuxing, decoding, filtering, encoding and muxing
28  * @example transcoding.c
29  */
30 
31 #include <libavcodec/avcodec.h>
32 #include <libavformat/avformat.h>
33 #include <libavfilter/buffersink.h>
34 #include <libavfilter/buffersrc.h>
35 #include <libavutil/opt.h>
36 #include <libavutil/pixdesc.h>
37 
40 typedef struct FilteringContext {
46 
47 typedef struct StreamContext {
52 
53 static int open_input_file(const char *filename)
54 {
55  int ret;
56  unsigned int i;
57 
58  ifmt_ctx = NULL;
59  if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
60  av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
61  return ret;
62  }
63 
64  if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
65  av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
66  return ret;
67  }
68 
69  stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
70  if (!stream_ctx)
71  return AVERROR(ENOMEM);
72 
73  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
74  AVStream *stream = ifmt_ctx->streams[i];
76  AVCodecContext *codec_ctx;
77  if (!dec) {
78  av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
80  }
81  codec_ctx = avcodec_alloc_context3(dec);
82  if (!codec_ctx) {
83  av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
84  return AVERROR(ENOMEM);
85  }
86  ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
87  if (ret < 0) {
88  av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
89  "for stream #%u\n", i);
90  return ret;
91  }
92  /* Reencode video & audio and remux subtitles etc. */
93  if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
94  || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
95  if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
96  codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
97  /* Open decoder */
98  ret = avcodec_open2(codec_ctx, dec, NULL);
99  if (ret < 0) {
100  av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
101  return ret;
102  }
103  }
104  stream_ctx[i].dec_ctx = codec_ctx;
105  }
106 
107  av_dump_format(ifmt_ctx, 0, filename, 0);
108  return 0;
109 }
110 
111 static int open_output_file(const char *filename)
112 {
113  AVStream *out_stream;
114  AVStream *in_stream;
115  AVCodecContext *dec_ctx, *enc_ctx;
116  AVCodec *encoder;
117  int ret;
118  unsigned int i;
119 
120  ofmt_ctx = NULL;
121  avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
122  if (!ofmt_ctx) {
123  av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
124  return AVERROR_UNKNOWN;
125  }
126 
127 
128  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
129  out_stream = avformat_new_stream(ofmt_ctx, NULL);
130  if (!out_stream) {
131  av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
132  return AVERROR_UNKNOWN;
133  }
134 
135  in_stream = ifmt_ctx->streams[i];
136  dec_ctx = stream_ctx[i].dec_ctx;
137 
138  if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
139  || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
140  /* in this example, we choose transcoding to same codec */
141  encoder = avcodec_find_encoder(dec_ctx->codec_id);
142  if (!encoder) {
143  av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
144  return AVERROR_INVALIDDATA;
145  }
146  enc_ctx = avcodec_alloc_context3(encoder);
147  if (!enc_ctx) {
148  av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
149  return AVERROR(ENOMEM);
150  }
151 
152  /* In this example, we transcode to same properties (picture size,
153  * sample rate etc.). These properties can be changed for output
154  * streams easily using filters */
155  if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
156  enc_ctx->height = dec_ctx->height;
157  enc_ctx->width = dec_ctx->width;
158  enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
159  /* take first format from list of supported formats */
160  if (encoder->pix_fmts)
161  enc_ctx->pix_fmt = encoder->pix_fmts[0];
162  else
163  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
164  /* video time_base can be set to whatever is handy and supported by encoder */
165  enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
166  } else {
167  enc_ctx->sample_rate = dec_ctx->sample_rate;
168  enc_ctx->channel_layout = dec_ctx->channel_layout;
170  /* take first format from list of supported formats */
171  enc_ctx->sample_fmt = encoder->sample_fmts[0];
172  enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
173  }
174 
175  if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
177 
178  /* Third parameter can be used to pass settings to encoder */
179  ret = avcodec_open2(enc_ctx, encoder, NULL);
180  if (ret < 0) {
181  av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
182  return ret;
183  }
184  ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
185  if (ret < 0) {
186  av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
187  return ret;
188  }
189 
190  out_stream->time_base = enc_ctx->time_base;
191  stream_ctx[i].enc_ctx = enc_ctx;
192  } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
193  av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
194  return AVERROR_INVALIDDATA;
195  } else {
196  /* if this stream must be remuxed */
197  ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
198  if (ret < 0) {
199  av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
200  return ret;
201  }
202  out_stream->time_base = in_stream->time_base;
203  }
204 
205  }
206  av_dump_format(ofmt_ctx, 0, filename, 1);
207 
208  if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
209  ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
210  if (ret < 0) {
211  av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
212  return ret;
213  }
214  }
215 
216  /* init muxer, write output file header */
217  ret = avformat_write_header(ofmt_ctx, NULL);
218  if (ret < 0) {
219  av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
220  return ret;
221  }
222 
223  return 0;
224 }
225 
227  AVCodecContext *enc_ctx, const char *filter_spec)
228 {
229  char args[512];
230  int ret = 0;
231  const AVFilter *buffersrc = NULL;
232  const AVFilter *buffersink = NULL;
238 
239  if (!outputs || !inputs || !filter_graph) {
240  ret = AVERROR(ENOMEM);
241  goto end;
242  }
243 
244  if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
245  buffersrc = avfilter_get_by_name("buffer");
246  buffersink = avfilter_get_by_name("buffersink");
247  if (!buffersrc || !buffersink) {
248  av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
249  ret = AVERROR_UNKNOWN;
250  goto end;
251  }
252 
253  snprintf(args, sizeof(args),
254  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
255  dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
256  dec_ctx->time_base.num, dec_ctx->time_base.den,
257  dec_ctx->sample_aspect_ratio.num,
258  dec_ctx->sample_aspect_ratio.den);
259 
260  ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
261  args, NULL, filter_graph);
262  if (ret < 0) {
263  av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
264  goto end;
265  }
266 
267  ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
268  NULL, NULL, filter_graph);
269  if (ret < 0) {
270  av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
271  goto end;
272  }
273 
274  ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
275  (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
277  if (ret < 0) {
278  av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
279  goto end;
280  }
281  } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
282  buffersrc = avfilter_get_by_name("abuffer");
283  buffersink = avfilter_get_by_name("abuffersink");
284  if (!buffersrc || !buffersink) {
285  av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
286  ret = AVERROR_UNKNOWN;
287  goto end;
288  }
289 
290  if (!dec_ctx->channel_layout)
291  dec_ctx->channel_layout =
293  snprintf(args, sizeof(args),
294  "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
295  dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
297  dec_ctx->channel_layout);
298  ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
299  args, NULL, filter_graph);
300  if (ret < 0) {
301  av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
302  goto end;
303  }
304 
305  ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
306  NULL, NULL, filter_graph);
307  if (ret < 0) {
308  av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
309  goto end;
310  }
311 
312  ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
313  (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
315  if (ret < 0) {
316  av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
317  goto end;
318  }
319 
320  ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
321  (uint8_t*)&enc_ctx->channel_layout,
322  sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
323  if (ret < 0) {
324  av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
325  goto end;
326  }
327 
328  ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
329  (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
331  if (ret < 0) {
332  av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
333  goto end;
334  }
335  } else {
336  ret = AVERROR_UNKNOWN;
337  goto end;
338  }
339 
340  /* Endpoints for the filter graph. */
341  outputs->name = av_strdup("in");
342  outputs->filter_ctx = buffersrc_ctx;
343  outputs->pad_idx = 0;
344  outputs->next = NULL;
345 
346  inputs->name = av_strdup("out");
347  inputs->filter_ctx = buffersink_ctx;
348  inputs->pad_idx = 0;
349  inputs->next = NULL;
350 
351  if (!outputs->name || !inputs->name) {
352  ret = AVERROR(ENOMEM);
353  goto end;
354  }
355 
356  if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
357  &inputs, &outputs, NULL)) < 0)
358  goto end;
359 
360  if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
361  goto end;
362 
363  /* Fill FilteringContext */
366  fctx->filter_graph = filter_graph;
367 
368 end:
369  avfilter_inout_free(&inputs);
370  avfilter_inout_free(&outputs);
371 
372  return ret;
373 }
374 
375 static int init_filters(void)
376 {
377  const char *filter_spec;
378  unsigned int i;
379  int ret;
380  filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
381  if (!filter_ctx)
382  return AVERROR(ENOMEM);
383 
384  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
385  filter_ctx[i].buffersrc_ctx = NULL;
386  filter_ctx[i].buffersink_ctx = NULL;
387  filter_ctx[i].filter_graph = NULL;
388  if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
389  || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
390  continue;
391 
392 
393  if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
394  filter_spec = "null"; /* passthrough (dummy) filter for video */
395  else
396  filter_spec = "anull"; /* passthrough (dummy) filter for audio */
397  ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
398  stream_ctx[i].enc_ctx, filter_spec);
399  if (ret)
400  return ret;
401  }
402  return 0;
403 }
404 
405 static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
406  int ret;
407  int got_frame_local;
408  AVPacket enc_pkt;
409  int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
410  (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
412 
413  if (!got_frame)
414  got_frame = &got_frame_local;
415 
416  av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
417  /* encode filtered frame */
418  enc_pkt.data = NULL;
419  enc_pkt.size = 0;
420  av_init_packet(&enc_pkt);
421  ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
422  filt_frame, got_frame);
423  av_frame_free(&filt_frame);
424  if (ret < 0)
425  return ret;
426  if (!(*got_frame))
427  return 0;
428 
429  /* prepare packet for muxing */
430  enc_pkt.stream_index = stream_index;
431  av_packet_rescale_ts(&enc_pkt,
432  stream_ctx[stream_index].enc_ctx->time_base,
433  ofmt_ctx->streams[stream_index]->time_base);
434 
435  av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
436  /* mux encoded frame */
437  ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
438  return ret;
439 }
440 
441 static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
442 {
443  int ret;
444  AVFrame *filt_frame;
445 
446  av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
447  /* push the decoded frame into the filtergraph */
448  ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
449  frame, 0);
450  if (ret < 0) {
451  av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
452  return ret;
453  }
454 
455  /* pull filtered frames from the filtergraph */
456  while (1) {
457  filt_frame = av_frame_alloc();
458  if (!filt_frame) {
459  ret = AVERROR(ENOMEM);
460  break;
461  }
462  av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
463  ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
464  filt_frame);
465  if (ret < 0) {
466  /* if no more frames for output - returns AVERROR(EAGAIN)
467  * if flushed and no more frames for output - returns AVERROR_EOF
468  * rewrite retcode to 0 to show it as normal procedure completion
469  */
470  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
471  ret = 0;
472  av_frame_free(&filt_frame);
473  break;
474  }
475 
476  filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
477  ret = encode_write_frame(filt_frame, stream_index, NULL);
478  if (ret < 0)
479  break;
480  }
481 
482  return ret;
483 }
484 
485 static int flush_encoder(unsigned int stream_index)
486 {
487  int ret;
488  int got_frame;
489 
490  if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
492  return 0;
493 
494  while (1) {
495  av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
496  ret = encode_write_frame(NULL, stream_index, &got_frame);
497  if (ret < 0)
498  break;
499  if (!got_frame)
500  return 0;
501  }
502  return ret;
503 }
504 
505 int main(int argc, char **argv)
506 {
507  int ret;
508  AVPacket packet = { .data = NULL, .size = 0 };
509  AVFrame *frame = NULL;
510  enum AVMediaType type;
511  unsigned int stream_index;
512  unsigned int i;
513  int got_frame;
514  int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
515 
516  if (argc != 3) {
517  av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
518  return 1;
519  }
520 
521  if ((ret = open_input_file(argv[1])) < 0)
522  goto end;
523  if ((ret = open_output_file(argv[2])) < 0)
524  goto end;
525  if ((ret = init_filters()) < 0)
526  goto end;
527 
528  /* read all packets */
529  while (1) {
530  if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
531  break;
532  stream_index = packet.stream_index;
533  type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
534  av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
535  stream_index);
536 
537  if (filter_ctx[stream_index].filter_graph) {
538  av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
539  frame = av_frame_alloc();
540  if (!frame) {
541  ret = AVERROR(ENOMEM);
542  break;
543  }
544  av_packet_rescale_ts(&packet,
545  ifmt_ctx->streams[stream_index]->time_base,
546  stream_ctx[stream_index].dec_ctx->time_base);
547  dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
549  ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
550  &got_frame, &packet);
551  if (ret < 0) {
552  av_frame_free(&frame);
553  av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
554  break;
555  }
556 
557  if (got_frame) {
558  frame->pts = frame->best_effort_timestamp;
559  ret = filter_encode_write_frame(frame, stream_index);
560  av_frame_free(&frame);
561  if (ret < 0)
562  goto end;
563  } else {
564  av_frame_free(&frame);
565  }
566  } else {
567  /* remux this frame without reencoding */
568  av_packet_rescale_ts(&packet,
569  ifmt_ctx->streams[stream_index]->time_base,
570  ofmt_ctx->streams[stream_index]->time_base);
571 
572  ret = av_interleaved_write_frame(ofmt_ctx, &packet);
573  if (ret < 0)
574  goto end;
575  }
576  av_packet_unref(&packet);
577  }
578 
579  /* flush filters and encoders */
580  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
581  /* flush filter */
582  if (!filter_ctx[i].filter_graph)
583  continue;
585  if (ret < 0) {
586  av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
587  goto end;
588  }
589 
590  /* flush encoder */
591  ret = flush_encoder(i);
592  if (ret < 0) {
593  av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
594  goto end;
595  }
596  }
597 
598  av_write_trailer(ofmt_ctx);
599 end:
600  av_packet_unref(&packet);
601  av_frame_free(&frame);
602  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
603  avcodec_free_context(&stream_ctx[i].dec_ctx);
604  if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
605  avcodec_free_context(&stream_ctx[i].enc_ctx);
606  if (filter_ctx && filter_ctx[i].filter_graph)
607  avfilter_graph_free(&filter_ctx[i].filter_graph);
608  }
609  av_free(filter_ctx);
610  av_free(stream_ctx);
611  avformat_close_input(&ifmt_ctx);
612  if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
613  avio_closep(&ofmt_ctx->pb);
614  avformat_free_context(ofmt_ctx);
615 
616  if (ret < 0)
617  av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
618 
619  return ret ? 1 : 0;
620 }
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1187
AVFilterGraph * filter_graph
Definition: transcoding.c:43
#define NULL
Definition: coverity.c:32
AVRational framerate
Definition: avcodec.h:3108
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame)
Definition: transcoding.c:405
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:889
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1192
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
Memory buffer source API.
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:3960
int num
Numerator.
Definition: rational.h:59
int size
Definition: avcodec.h:1481
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:675
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1947
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
attribute_deprecated int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: encode.c:118
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1778
attribute_deprecated int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:904
GLint GLenum type
Definition: opengl_enc.c:104
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:583
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:537
AVCodec.
Definition: avcodec.h:3484
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1691
Undefined.
Definition: avutil.h:273
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Format I/O context.
Definition: avformat.h:1358
static int open_input_file(const char *filename)
Definition: transcoding.c:53
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5093
memory buffer sink API for audio and video
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1009
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2236
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:524
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
AVOptions.
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4469
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1426
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2099
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
uint8_t * data
Definition: avcodec.h:1480
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define av_log(a,...)
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:578
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2024
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static int flush_encoder(unsigned int stream_index)
Definition: transcoding.c:485
static AVFormatContext * ifmt_ctx
Definition: transcoding.c:38
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:704
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3956
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1648
int avformat_alloc_output_context2(AVFormatContext **ctx, ff_const59 AVOutputFormat *oformat, const char *format_name, const char *filename)
Allocate an AVFormatContext for an output format.
Definition: mux.c:148
static FilteringContext * filter_ctx
Definition: transcoding.c:45
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2279
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:488
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1414
AVFilterContext * buffersrc_ctx
Definition: transcoding.c:42
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:3505
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
static int init_filter(FilteringContext *fctx, AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, const char *filter_spec)
Definition: transcoding.c:226
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:508
AVRational time_base
Definition: signature.h:103
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
int width
picture width / height.
Definition: avcodec.h:1741
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:466
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1377
static int init_filters(void)
Definition: transcoding.c:375
Stream structure.
Definition: avformat.h:881
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
AVCodecContext * enc_ctx
Definition: transcoding.c:49
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:1576
attribute_deprecated int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:897
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
enum AVCodecID codec_id
Definition: avcodec.h:1578
int sample_rate
samples per second
Definition: avcodec.h:2228
AVIOContext * pb
I/O context.
Definition: avformat.h:1400
main external API structure.
Definition: avcodec.h:1568
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:894
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static StreamContext * stream_ctx
Definition: transcoding.c:51
Filter definition.
Definition: avfilter.h:144
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1011
Rational number (pair of numerator and denominator).
Definition: rational.h:58
attribute_deprecated int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: encode.c:264
static int open_output_file(const char *filename)
Definition: transcoding.c:111
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2042
AVMediaType
Definition: avutil.h:199
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:542
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:559
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4403
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1772
char * name
unique name for this input/output in the list
Definition: avfilter.h:1005
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:907
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
Main libavformat public API header.
int
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:198
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:463
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3577
static AVCodecContext * dec_ctx
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
int den
Denominator.
Definition: rational.h:60
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
Definition: transcoding.c:441
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:158
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4441
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:52
#define av_free(p)
int channels
number of audio channels
Definition: avcodec.h:2229
AVCodecContext * dec_ctx
Definition: transcoding.c:48
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:538
An instance of a filter.
Definition: avfilter.h:338
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1254
int main(int argc, char **argv)
Definition: transcoding.c:505
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1028
#define av_malloc_array(a, b)
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:3507
AVFilterContext * buffersink_ctx
Definition: transcoding.c:41
int stream_index
Definition: avcodec.h:1482
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:910
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:67
This structure stores compressed data.
Definition: avcodec.h:1457
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1242
static AVFormatContext * ofmt_ctx
Definition: transcoding.c:39
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191