FFmpeg
mux.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
23 /**
24  * @file libavformat muxing API usage example
25  * @example mux.c
26  *
27  * Generate a synthetic audio and video signal and mux them to a media file in
28  * any supported libavformat format. The default codecs are used.
29  */
30 
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <string.h>
34 #include <math.h>
35 
36 #include <libavutil/avassert.h>
38 #include <libavutil/opt.h>
39 #include <libavutil/mathematics.h>
40 #include <libavutil/timestamp.h>
41 #include <libavcodec/avcodec.h>
42 #include <libavformat/avformat.h>
43 #include <libswscale/swscale.h>
45 
46 #define STREAM_DURATION 10.0
47 #define STREAM_FRAME_RATE 25 /* 25 images/s */
48 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
49 
50 #define SCALE_FLAGS SWS_BICUBIC
51 
52 // a wrapper around a single output AVStream
53 typedef struct OutputStream {
56 
57  /* pts of the next frame that will be generated */
58  int64_t next_pts;
60 
63 
65 
66  float t, tincr, tincr2;
67 
70 } OutputStream;
71 
72 static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
73 {
75 
76  printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
77  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
78  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
80  pkt->stream_index);
81 }
82 
85 {
86  int ret;
87 
88  // send the frame to the encoder
90  if (ret < 0) {
91  fprintf(stderr, "Error sending a frame to the encoder: %s\n",
92  av_err2str(ret));
93  exit(1);
94  }
95 
96  while (ret >= 0) {
98  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
99  break;
100  else if (ret < 0) {
101  fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
102  exit(1);
103  }
104 
105  /* rescale output packet timestamp values from codec to stream timebase */
106  av_packet_rescale_ts(pkt, c->time_base, st->time_base);
107  pkt->stream_index = st->index;
108 
109  /* Write the compressed frame to the media file. */
112  /* pkt is now blank (av_interleaved_write_frame() takes ownership of
113  * its contents and resets pkt), so that no unreferencing is necessary.
114  * This would be different if one used av_write_frame(). */
115  if (ret < 0) {
116  fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
117  exit(1);
118  }
119  }
120 
121  return ret == AVERROR_EOF ? 1 : 0;
122 }
123 
124 /* Add an output stream. */
126  const AVCodec **codec,
127  enum AVCodecID codec_id)
128 {
129  AVCodecContext *c;
130  int i;
131 
132  /* find the encoder */
133  *codec = avcodec_find_encoder(codec_id);
134  if (!(*codec)) {
135  fprintf(stderr, "Could not find encoder for '%s'\n",
137  exit(1);
138  }
139 
140  ost->tmp_pkt = av_packet_alloc();
141  if (!ost->tmp_pkt) {
142  fprintf(stderr, "Could not allocate AVPacket\n");
143  exit(1);
144  }
145 
146  ost->st = avformat_new_stream(oc, NULL);
147  if (!ost->st) {
148  fprintf(stderr, "Could not allocate stream\n");
149  exit(1);
150  }
151  ost->st->id = oc->nb_streams-1;
152  c = avcodec_alloc_context3(*codec);
153  if (!c) {
154  fprintf(stderr, "Could not alloc an encoding context\n");
155  exit(1);
156  }
157  ost->enc = c;
158 
159  switch ((*codec)->type) {
160  case AVMEDIA_TYPE_AUDIO:
161  c->sample_fmt = (*codec)->sample_fmts ?
162  (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
163  c->bit_rate = 64000;
164  c->sample_rate = 44100;
165  if ((*codec)->supported_samplerates) {
166  c->sample_rate = (*codec)->supported_samplerates[0];
167  for (i = 0; (*codec)->supported_samplerates[i]; i++) {
168  if ((*codec)->supported_samplerates[i] == 44100)
169  c->sample_rate = 44100;
170  }
171  }
173  ost->st->time_base = (AVRational){ 1, c->sample_rate };
174  break;
175 
176  case AVMEDIA_TYPE_VIDEO:
177  c->codec_id = codec_id;
178 
179  c->bit_rate = 400000;
180  /* Resolution must be a multiple of two. */
181  c->width = 352;
182  c->height = 288;
183  /* timebase: This is the fundamental unit of time (in seconds) in terms
184  * of which frame timestamps are represented. For fixed-fps content,
185  * timebase should be 1/framerate and timestamp increments should be
186  * identical to 1. */
187  ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
188  c->time_base = ost->st->time_base;
189 
190  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
191  c->pix_fmt = STREAM_PIX_FMT;
192  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
193  /* just for testing, we also add B-frames */
194  c->max_b_frames = 2;
195  }
196  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
197  /* Needed to avoid using macroblocks in which some coeffs overflow.
198  * This does not happen with normal video, it just happens here as
199  * the motion of the chroma plane does not match the luma plane. */
200  c->mb_decision = 2;
201  }
202  break;
203 
204  default:
205  break;
206  }
207 
208  /* Some formats want stream headers to be separate. */
209  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
210  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
211 }
212 
213 /**************************************************************/
214 /* audio output */
215 
216 static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
217  const AVChannelLayout *channel_layout,
218  int sample_rate, int nb_samples)
219 {
221  if (!frame) {
222  fprintf(stderr, "Error allocating an audio frame\n");
223  exit(1);
224  }
225 
226  frame->format = sample_fmt;
227  av_channel_layout_copy(&frame->ch_layout, channel_layout);
228  frame->sample_rate = sample_rate;
229  frame->nb_samples = nb_samples;
230 
231  if (nb_samples) {
232  if (av_frame_get_buffer(frame, 0) < 0) {
233  fprintf(stderr, "Error allocating an audio buffer\n");
234  exit(1);
235  }
236  }
237 
238  return frame;
239 }
240 
241 static void open_audio(AVFormatContext *oc, const AVCodec *codec,
242  OutputStream *ost, AVDictionary *opt_arg)
243 {
244  AVCodecContext *c;
245  int nb_samples;
246  int ret;
247  AVDictionary *opt = NULL;
248 
249  c = ost->enc;
250 
251  /* open it */
252  av_dict_copy(&opt, opt_arg, 0);
253  ret = avcodec_open2(c, codec, &opt);
254  av_dict_free(&opt);
255  if (ret < 0) {
256  fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
257  exit(1);
258  }
259 
260  /* init signal generator */
261  ost->t = 0;
262  ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
263  /* increment frequency by 110 Hz per second */
264  ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
265 
266  if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
267  nb_samples = 10000;
268  else
269  nb_samples = c->frame_size;
270 
271  ost->frame = alloc_audio_frame(c->sample_fmt, &c->ch_layout,
272  c->sample_rate, nb_samples);
273  ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, &c->ch_layout,
274  c->sample_rate, nb_samples);
275 
276  /* copy the stream parameters to the muxer */
278  if (ret < 0) {
279  fprintf(stderr, "Could not copy the stream parameters\n");
280  exit(1);
281  }
282 
283  /* create resampler context */
284  ost->swr_ctx = swr_alloc();
285  if (!ost->swr_ctx) {
286  fprintf(stderr, "Could not allocate resampler context\n");
287  exit(1);
288  }
289 
290  /* set options */
291  av_opt_set_chlayout (ost->swr_ctx, "in_chlayout", &c->ch_layout, 0);
292  av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
293  av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
294  av_opt_set_chlayout (ost->swr_ctx, "out_chlayout", &c->ch_layout, 0);
295  av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
296  av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
297 
298  /* initialize the resampling context */
299  if ((ret = swr_init(ost->swr_ctx)) < 0) {
300  fprintf(stderr, "Failed to initialize the resampling context\n");
301  exit(1);
302  }
303 }
304 
305 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
306  * 'nb_channels' channels. */
308 {
309  AVFrame *frame = ost->tmp_frame;
310  int j, i, v;
311  int16_t *q = (int16_t*)frame->data[0];
312 
313  /* check if we want to generate more frames */
314  if (av_compare_ts(ost->next_pts, ost->enc->time_base,
315  STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
316  return NULL;
317 
318  for (j = 0; j <frame->nb_samples; j++) {
319  v = (int)(sin(ost->t) * 10000);
320  for (i = 0; i < ost->enc->ch_layout.nb_channels; i++)
321  *q++ = v;
322  ost->t += ost->tincr;
323  ost->tincr += ost->tincr2;
324  }
325 
326  frame->pts = ost->next_pts;
327  ost->next_pts += frame->nb_samples;
328 
329  return frame;
330 }
331 
332 /*
333  * encode one audio frame and send it to the muxer
334  * return 1 when encoding is finished, 0 otherwise
335  */
337 {
338  AVCodecContext *c;
339  AVFrame *frame;
340  int ret;
341  int dst_nb_samples;
342 
343  c = ost->enc;
344 
346 
347  if (frame) {
348  /* convert samples from native format to destination codec format, using the resampler */
349  /* compute destination number of samples */
350  dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
351  c->sample_rate, c->sample_rate, AV_ROUND_UP);
352  av_assert0(dst_nb_samples == frame->nb_samples);
353 
354  /* when we pass a frame to the encoder, it may keep a reference to it
355  * internally;
356  * make sure we do not overwrite it here
357  */
358  ret = av_frame_make_writable(ost->frame);
359  if (ret < 0)
360  exit(1);
361 
362  /* convert to destination format */
363  ret = swr_convert(ost->swr_ctx,
364  ost->frame->data, dst_nb_samples,
365  (const uint8_t **)frame->data, frame->nb_samples);
366  if (ret < 0) {
367  fprintf(stderr, "Error while converting\n");
368  exit(1);
369  }
370  frame = ost->frame;
371 
372  frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
373  ost->samples_count += dst_nb_samples;
374  }
375 
376  return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
377 }
378 
379 /**************************************************************/
380 /* video output */
381 
383 {
384  AVFrame *frame;
385  int ret;
386 
387  frame = av_frame_alloc();
388  if (!frame)
389  return NULL;
390 
391  frame->format = pix_fmt;
392  frame->width = width;
393  frame->height = height;
394 
395  /* allocate the buffers for the frame data */
397  if (ret < 0) {
398  fprintf(stderr, "Could not allocate frame data.\n");
399  exit(1);
400  }
401 
402  return frame;
403 }
404 
405 static void open_video(AVFormatContext *oc, const AVCodec *codec,
406  OutputStream *ost, AVDictionary *opt_arg)
407 {
408  int ret;
409  AVCodecContext *c = ost->enc;
410  AVDictionary *opt = NULL;
411 
412  av_dict_copy(&opt, opt_arg, 0);
413 
414  /* open the codec */
415  ret = avcodec_open2(c, codec, &opt);
416  av_dict_free(&opt);
417  if (ret < 0) {
418  fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
419  exit(1);
420  }
421 
422  /* allocate and init a re-usable frame */
423  ost->frame = alloc_frame(c->pix_fmt, c->width, c->height);
424  if (!ost->frame) {
425  fprintf(stderr, "Could not allocate video frame\n");
426  exit(1);
427  }
428 
429  /* If the output format is not YUV420P, then a temporary YUV420P
430  * picture is needed too. It is then converted to the required
431  * output format. */
432  ost->tmp_frame = NULL;
433  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
434  ost->tmp_frame = alloc_frame(AV_PIX_FMT_YUV420P, c->width, c->height);
435  if (!ost->tmp_frame) {
436  fprintf(stderr, "Could not allocate temporary video frame\n");
437  exit(1);
438  }
439  }
440 
441  /* copy the stream parameters to the muxer */
443  if (ret < 0) {
444  fprintf(stderr, "Could not copy the stream parameters\n");
445  exit(1);
446  }
447 }
448 
449 /* Prepare a dummy image. */
450 static void fill_yuv_image(AVFrame *pict, int frame_index,
451  int width, int height)
452 {
453  int x, y, i;
454 
455  i = frame_index;
456 
457  /* Y */
458  for (y = 0; y < height; y++)
459  for (x = 0; x < width; x++)
460  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
461 
462  /* Cb and Cr */
463  for (y = 0; y < height / 2; y++) {
464  for (x = 0; x < width / 2; x++) {
465  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
466  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
467  }
468  }
469 }
470 
472 {
473  AVCodecContext *c = ost->enc;
474 
475  /* check if we want to generate more frames */
476  if (av_compare_ts(ost->next_pts, c->time_base,
477  STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
478  return NULL;
479 
480  /* when we pass a frame to the encoder, it may keep a reference to it
481  * internally; make sure we do not overwrite it here */
482  if (av_frame_make_writable(ost->frame) < 0)
483  exit(1);
484 
485  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
486  /* as we only generate a YUV420P picture, we must convert it
487  * to the codec pixel format if needed */
488  if (!ost->sws_ctx) {
489  ost->sws_ctx = sws_getContext(c->width, c->height,
491  c->width, c->height,
492  c->pix_fmt,
494  if (!ost->sws_ctx) {
495  fprintf(stderr,
496  "Could not initialize the conversion context\n");
497  exit(1);
498  }
499  }
500  fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
501  sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
502  ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
503  ost->frame->linesize);
504  } else {
505  fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
506  }
507 
508  ost->frame->pts = ost->next_pts++;
509 
510  return ost->frame;
511 }
512 
513 /*
514  * encode one video frame and send it to the muxer
515  * return 1 when encoding is finished, 0 otherwise
516  */
518 {
519  return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
520 }
521 
523 {
524  avcodec_free_context(&ost->enc);
525  av_frame_free(&ost->frame);
526  av_frame_free(&ost->tmp_frame);
527  av_packet_free(&ost->tmp_pkt);
528  sws_freeContext(ost->sws_ctx);
529  swr_free(&ost->swr_ctx);
530 }
531 
532 /**************************************************************/
533 /* media file output */
534 
535 int main(int argc, char **argv)
536 {
537  OutputStream video_st = { 0 }, audio_st = { 0 };
538  const AVOutputFormat *fmt;
539  const char *filename;
540  AVFormatContext *oc;
541  const AVCodec *audio_codec, *video_codec;
542  int ret;
543  int have_video = 0, have_audio = 0;
544  int encode_video = 0, encode_audio = 0;
545  AVDictionary *opt = NULL;
546  int i;
547 
548  if (argc < 2) {
549  printf("usage: %s output_file\n"
550  "API example program to output a media file with libavformat.\n"
551  "This program generates a synthetic audio and video stream, encodes and\n"
552  "muxes them into a file named output_file.\n"
553  "The output format is automatically guessed according to the file extension.\n"
554  "Raw images can also be output by using '%%d' in the filename.\n"
555  "\n", argv[0]);
556  return 1;
557  }
558 
559  filename = argv[1];
560  for (i = 2; i+1 < argc; i+=2) {
561  if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
562  av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
563  }
564 
565  /* allocate the output media context */
566  avformat_alloc_output_context2(&oc, NULL, NULL, filename);
567  if (!oc) {
568  printf("Could not deduce output format from file extension: using MPEG.\n");
569  avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
570  }
571  if (!oc)
572  return 1;
573 
574  fmt = oc->oformat;
575 
576  /* Add the audio and video streams using the default format codecs
577  * and initialize the codecs. */
578  if (fmt->video_codec != AV_CODEC_ID_NONE) {
579  add_stream(&video_st, oc, &video_codec, fmt->video_codec);
580  have_video = 1;
581  encode_video = 1;
582  }
583  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
584  add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
585  have_audio = 1;
586  encode_audio = 1;
587  }
588 
589  /* Now that all the parameters are set, we can open the audio and
590  * video codecs and allocate the necessary encode buffers. */
591  if (have_video)
592  open_video(oc, video_codec, &video_st, opt);
593 
594  if (have_audio)
595  open_audio(oc, audio_codec, &audio_st, opt);
596 
597  av_dump_format(oc, 0, filename, 1);
598 
599  /* open the output file, if needed */
600  if (!(fmt->flags & AVFMT_NOFILE)) {
601  ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
602  if (ret < 0) {
603  fprintf(stderr, "Could not open '%s': %s\n", filename,
604  av_err2str(ret));
605  return 1;
606  }
607  }
608 
609  /* Write the stream header, if any. */
610  ret = avformat_write_header(oc, &opt);
611  if (ret < 0) {
612  fprintf(stderr, "Error occurred when opening output file: %s\n",
613  av_err2str(ret));
614  return 1;
615  }
616 
617  while (encode_video || encode_audio) {
618  /* select the stream to encode */
619  if (encode_video &&
620  (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
621  audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
622  encode_video = !write_video_frame(oc, &video_st);
623  } else {
624  encode_audio = !write_audio_frame(oc, &audio_st);
625  }
626  }
627 
628  av_write_trailer(oc);
629 
630  /* Close each codec. */
631  if (have_video)
632  close_stream(oc, &video_st);
633  if (have_audio)
634  close_stream(oc, &audio_st);
635 
636  if (!(fmt->flags & AVFMT_NOFILE))
637  /* Close the output file. */
638  avio_closep(&oc->pb);
639 
640  /* free the stream */
642 
643  return 0;
644 }
AVCodec
AVCodec.
Definition: codec.h:187
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:541
OutputStream::tincr
float tincr
Definition: mux.c:66
OutputStream::samples_count
int samples_count
Definition: mux.c:59
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
OutputStream::enc
AVCodecContext * enc
Definition: mux.c:55
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:288
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:379
avformat_alloc_output_context2
int avformat_alloc_output_context2(AVFormatContext **avctx, const AVOutputFormat *oformat, const char *format, const char *filename)
Allocate an AVFormatContext for an output format.
Definition: mux.c:94
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const struct AVCodec *c)
Add a new stream to a media file.
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
alloc_audio_frame
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, const AVChannelLayout *channel_layout, int sample_rate, int nb_samples)
Definition: mux.c:216
write_frame
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c, AVStream *st, AVFrame *frame, AVPacket *pkt)
Definition: mux.c:83
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
avcodec_find_encoder
const AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:968
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:662
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1323
avio_open
int avio_open(AVIOContext **s, const char *filename, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:497
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:542
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1206
AVDictionary
Definition: dict.c:34
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:338
OutputStream::next_pts
int64_t next_pts
Definition: mux.c:58
OutputStream::tmp_frame
AVFrame * tmp_frame
Definition: mux.c:62
AV_ROUND_UP
@ AV_ROUND_UP
Round toward +infinity.
Definition: mathematics.h:134
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *const *out_arg, int out_count, const uint8_t *const *in_arg, int in_count)
Convert audio.
Definition: swresample.c:719
swr_get_delay
int64_t swr_get_delay(struct SwrContext *s, int64_t base)
Gets the delay the next input sample will experience relative to the next output sample.
Definition: swresample.c:874
open_audio
static void open_audio(AVFormatContext *oc, const AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: mux.c:241
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
OutputStream::swr_ctx
struct SwrContext * swr_ctx
Definition: mux.c:69
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:761
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:140
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
get_audio_frame
static AVFrame * get_audio_frame(OutputStream *ost)
Definition: mux.c:307
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
OutputStream::frame
AVFrame * frame
Definition: mux.c:61
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
swr_alloc
av_cold struct SwrContext * swr_alloc(void)
Allocate SwrContext.
Definition: options.c:148
AVOutputFormat::audio_codec
enum AVCodecID audio_codec
default audio codec
Definition: avformat.h:520
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AVIO_FLAG_WRITE
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:618
open_video
static void open_video(AVFormatContext *oc, const AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: mux.c:405
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
codec_id
enum AVCodecID codec_id
Definition: vaapi_decode.c:387
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
avformat_write_header
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:487
if
if(ret)
Definition: filter_design.txt:179
AVFormatContext
Format I/O context.
Definition: avformat.h:1255
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:766
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:782
NULL
#define NULL
Definition: coverity.c:32
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
get_video_frame
static AVFrame * get_video_frame(OutputStream *ost)
Definition: mux.c:471
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1297
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:128
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:142
log_packet
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
Definition: mux.c:72
OutputStream::sws_ctx
struct SwsContext * sws_ctx
Definition: mux.c:68
audio_st
AVStream * audio_st
Definition: movenc.c:61
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:791
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
AVCodecID
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:49
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1311
AVOutputFormat::flags
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:529
av_rescale_rnd
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
fmt_ctx
static AVFormatContext * fmt_ctx
Definition: decode_filter_audio.c:46
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
STREAM_FRAME_RATE
#define STREAM_FRAME_RATE
Definition: mux.c:47
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:303
sws_getContext
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
Definition: utils.c:2102
av_opt_set_chlayout
int av_opt_set_chlayout(void *obj, const char *name, const AVChannelLayout *channel_layout, int search_flags)
Definition: opt.c:934
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
SCALE_FLAGS
#define SCALE_FLAGS
Definition: mux.c:50
alloc_frame
static AVFrame * alloc_frame(enum AVPixelFormat pix_fmt, int width, int height)
Definition: mux.c:382
video_st
AVStream * video_st
Definition: movenc.c:61
main
int main(int argc, char **argv)
Definition: mux.c:535
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:121
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:468
printf
printf("static const uint8_t my_array[100] = {\n")
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:523
height
#define height
OutputStream::t
float t
Definition: mux.c:66
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
av_packet_rescale_ts
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: packet.c:531
STREAM_DURATION
#define STREAM_DURATION
Definition: mux.c:46
M_PI
#define M_PI
Definition: mathematics.h:67
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:406
av_write_trailer
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1295
AVFMT_GLOBALHEADER
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:478
AV_CODEC_ID_NONE
@ AV_CODEC_ID_NONE
Definition: codec_id.h:50
AVOutputFormat
Definition: avformat.h:509
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:517
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:508
avcodec.h
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:755
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:743
OutputStream::tincr2
float tincr2
Definition: mux.c:66
OutputStream::tmp_pkt
AVPacket * tmp_pkt
Definition: mux.c:64
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVFormatContext::oformat
const struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1274
avformat.h
STREAM_PIX_FMT
#define STREAM_PIX_FMT
Definition: mux.c:48
write_audio_frame
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
Definition: mux.c:336
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:749
write_video_frame
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
Definition: mux.c:517
channel_layout.h
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2433
close_stream
static void close_stream(AVFormatContext *oc, OutputStream *ost)
Definition: mux.c:522
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: avformat.c:141
AVOutputFormat::video_codec
enum AVCodecID video_codec
default video codec
Definition: avformat.h:521
fill_yuv_image
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
Definition: mux.c:450
AVPacket::stream_index
int stream_index
Definition: packet.h:526
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:440
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avcodec_parameters_from_context
int avcodec_parameters_from_context(struct AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:137
AVPacket
This structure stores compressed data.
Definition: packet.h:501
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: avio.c:649
av_interleaved_write_frame
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1280
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
timestamp.h
OutputStream
Definition: mux.c:53
OutputStream::st
AVStream * st
Definition: mux.c:54
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
int
int
Definition: ffmpeg_filter.c:424
SwsContext
Definition: swscale_internal.h:301
av_opt_set_sample_fmt
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags)
Definition: opt.c:910
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
add_stream
static void add_stream(OutputStream *ost, AVFormatContext *oc, const AVCodec **codec, enum AVCodecID codec_id)
Definition: mux.c:125
swscale.h