FFmpeg
muxing.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
23 /**
24  * @file
25  * libavformat API example.
26  *
27  * Output a media file in any supported libavformat format. The default
28  * codecs are used.
29  * @example muxing.c
30  */
31 
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <math.h>
36 
37 #include <libavutil/avassert.h>
39 #include <libavutil/opt.h>
40 #include <libavutil/mathematics.h>
41 #include <libavutil/timestamp.h>
42 #include <libavformat/avformat.h>
43 #include <libswscale/swscale.h>
45 
46 #define STREAM_DURATION 10.0
47 #define STREAM_FRAME_RATE 25 /* 25 images/s */
48 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
49 
50 #define SCALE_FLAGS SWS_BICUBIC
51 
52 // a wrapper around a single output AVStream
53 typedef struct OutputStream {
56 
57  /* pts of the next frame that will be generated */
58  int64_t next_pts;
60 
63 
64  float t, tincr, tincr2;
65 
68 } OutputStream;
69 
70 static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
71 {
72  AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
73 
74  printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
75  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
76  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
77  av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
78  pkt->stream_index);
79 }
80 
83 {
84  int ret;
85 
86  // send the frame to the encoder
87  ret = avcodec_send_frame(c, frame);
88  if (ret < 0) {
89  fprintf(stderr, "Error sending a frame to the encoder: %s\n",
90  av_err2str(ret));
91  exit(1);
92  }
93 
94  while (ret >= 0) {
95  AVPacket pkt = { 0 };
96 
97  ret = avcodec_receive_packet(c, &pkt);
98  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
99  break;
100  else if (ret < 0) {
101  fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
102  exit(1);
103  }
104 
105  /* rescale output packet timestamp values from codec to stream timebase */
107  pkt.stream_index = st->index;
108 
109  /* Write the compressed frame to the media file. */
110  log_packet(fmt_ctx, &pkt);
111  ret = av_interleaved_write_frame(fmt_ctx, &pkt);
112  av_packet_unref(&pkt);
113  if (ret < 0) {
114  fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
115  exit(1);
116  }
117  }
118 
119  return ret == AVERROR_EOF ? 1 : 0;
120 }
121 
122 /* Add an output stream. */
124  AVCodec **codec,
125  enum AVCodecID codec_id)
126 {
127  AVCodecContext *c;
128  int i;
129 
130  /* find the encoder */
131  *codec = avcodec_find_encoder(codec_id);
132  if (!(*codec)) {
133  fprintf(stderr, "Could not find encoder for '%s'\n",
134  avcodec_get_name(codec_id));
135  exit(1);
136  }
137 
138  ost->st = avformat_new_stream(oc, NULL);
139  if (!ost->st) {
140  fprintf(stderr, "Could not allocate stream\n");
141  exit(1);
142  }
143  ost->st->id = oc->nb_streams-1;
144  c = avcodec_alloc_context3(*codec);
145  if (!c) {
146  fprintf(stderr, "Could not alloc an encoding context\n");
147  exit(1);
148  }
149  ost->enc = c;
150 
151  switch ((*codec)->type) {
152  case AVMEDIA_TYPE_AUDIO:
153  c->sample_fmt = (*codec)->sample_fmts ?
154  (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
155  c->bit_rate = 64000;
156  c->sample_rate = 44100;
157  if ((*codec)->supported_samplerates) {
158  c->sample_rate = (*codec)->supported_samplerates[0];
159  for (i = 0; (*codec)->supported_samplerates[i]; i++) {
160  if ((*codec)->supported_samplerates[i] == 44100)
161  c->sample_rate = 44100;
162  }
163  }
166  if ((*codec)->channel_layouts) {
167  c->channel_layout = (*codec)->channel_layouts[0];
168  for (i = 0; (*codec)->channel_layouts[i]; i++) {
169  if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
171  }
172  }
174  ost->st->time_base = (AVRational){ 1, c->sample_rate };
175  break;
176 
177  case AVMEDIA_TYPE_VIDEO:
178  c->codec_id = codec_id;
179 
180  c->bit_rate = 400000;
181  /* Resolution must be a multiple of two. */
182  c->width = 352;
183  c->height = 288;
184  /* timebase: This is the fundamental unit of time (in seconds) in terms
185  * of which frame timestamps are represented. For fixed-fps content,
186  * timebase should be 1/framerate and timestamp increments should be
187  * identical to 1. */
188  ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
189  c->time_base = ost->st->time_base;
190 
191  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
192  c->pix_fmt = STREAM_PIX_FMT;
193  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
194  /* just for testing, we also add B-frames */
195  c->max_b_frames = 2;
196  }
197  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
198  /* Needed to avoid using macroblocks in which some coeffs overflow.
199  * This does not happen with normal video, it just happens here as
200  * the motion of the chroma plane does not match the luma plane. */
201  c->mb_decision = 2;
202  }
203  break;
204 
205  default:
206  break;
207  }
208 
209  /* Some formats want stream headers to be separate. */
210  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
212 }
213 
214 /**************************************************************/
215 /* audio output */
216 
217 static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
218  uint64_t channel_layout,
219  int sample_rate, int nb_samples)
220 {
222  int ret;
223 
224  if (!frame) {
225  fprintf(stderr, "Error allocating an audio frame\n");
226  exit(1);
227  }
228 
229  frame->format = sample_fmt;
230  frame->channel_layout = channel_layout;
231  frame->sample_rate = sample_rate;
232  frame->nb_samples = nb_samples;
233 
234  if (nb_samples) {
235  ret = av_frame_get_buffer(frame, 0);
236  if (ret < 0) {
237  fprintf(stderr, "Error allocating an audio buffer\n");
238  exit(1);
239  }
240  }
241 
242  return frame;
243 }
244 
245 static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
246 {
247  AVCodecContext *c;
248  int nb_samples;
249  int ret;
250  AVDictionary *opt = NULL;
251 
252  c = ost->enc;
253 
254  /* open it */
255  av_dict_copy(&opt, opt_arg, 0);
256  ret = avcodec_open2(c, codec, &opt);
257  av_dict_free(&opt);
258  if (ret < 0) {
259  fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
260  exit(1);
261  }
262 
263  /* init signal generator */
264  ost->t = 0;
265  ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
266  /* increment frequency by 110 Hz per second */
267  ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
268 
270  nb_samples = 10000;
271  else
272  nb_samples = c->frame_size;
273 
275  c->sample_rate, nb_samples);
277  c->sample_rate, nb_samples);
278 
279  /* copy the stream parameters to the muxer */
281  if (ret < 0) {
282  fprintf(stderr, "Could not copy the stream parameters\n");
283  exit(1);
284  }
285 
286  /* create resampler context */
287  ost->swr_ctx = swr_alloc();
288  if (!ost->swr_ctx) {
289  fprintf(stderr, "Could not allocate resampler context\n");
290  exit(1);
291  }
292 
293  /* set options */
294  av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
295  av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
296  av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
297  av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
298  av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
299  av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
300 
301  /* initialize the resampling context */
302  if ((ret = swr_init(ost->swr_ctx)) < 0) {
303  fprintf(stderr, "Failed to initialize the resampling context\n");
304  exit(1);
305  }
306 }
307 
308 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
309  * 'nb_channels' channels. */
311 {
312  AVFrame *frame = ost->tmp_frame;
313  int j, i, v;
314  int16_t *q = (int16_t*)frame->data[0];
315 
316  /* check if we want to generate more frames */
317  if (av_compare_ts(ost->next_pts, ost->enc->time_base,
318  STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
319  return NULL;
320 
321  for (j = 0; j <frame->nb_samples; j++) {
322  v = (int)(sin(ost->t) * 10000);
323  for (i = 0; i < ost->enc->channels; i++)
324  *q++ = v;
325  ost->t += ost->tincr;
326  ost->tincr += ost->tincr2;
327  }
328 
329  frame->pts = ost->next_pts;
330  ost->next_pts += frame->nb_samples;
331 
332  return frame;
333 }
334 
335 /*
336  * encode one audio frame and send it to the muxer
337  * return 1 when encoding is finished, 0 otherwise
338  */
340 {
341  AVCodecContext *c;
342  AVFrame *frame;
343  int ret;
344  int dst_nb_samples;
345 
346  c = ost->enc;
347 
348  frame = get_audio_frame(ost);
349 
350  if (frame) {
351  /* convert samples from native format to destination codec format, using the resampler */
352  /* compute destination number of samples */
353  dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
355  av_assert0(dst_nb_samples == frame->nb_samples);
356 
357  /* when we pass a frame to the encoder, it may keep a reference to it
358  * internally;
359  * make sure we do not overwrite it here
360  */
361  ret = av_frame_make_writable(ost->frame);
362  if (ret < 0)
363  exit(1);
364 
365  /* convert to destination format */
366  ret = swr_convert(ost->swr_ctx,
367  ost->frame->data, dst_nb_samples,
368  (const uint8_t **)frame->data, frame->nb_samples);
369  if (ret < 0) {
370  fprintf(stderr, "Error while converting\n");
371  exit(1);
372  }
373  frame = ost->frame;
374 
375  frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
376  ost->samples_count += dst_nb_samples;
377  }
378 
379  return write_frame(oc, c, ost->st, frame);
380 }
381 
382 /**************************************************************/
383 /* video output */
384 
386 {
387  AVFrame *picture;
388  int ret;
389 
390  picture = av_frame_alloc();
391  if (!picture)
392  return NULL;
393 
394  picture->format = pix_fmt;
395  picture->width = width;
396  picture->height = height;
397 
398  /* allocate the buffers for the frame data */
399  ret = av_frame_get_buffer(picture, 0);
400  if (ret < 0) {
401  fprintf(stderr, "Could not allocate frame data.\n");
402  exit(1);
403  }
404 
405  return picture;
406 }
407 
408 static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
409 {
410  int ret;
411  AVCodecContext *c = ost->enc;
412  AVDictionary *opt = NULL;
413 
414  av_dict_copy(&opt, opt_arg, 0);
415 
416  /* open the codec */
417  ret = avcodec_open2(c, codec, &opt);
418  av_dict_free(&opt);
419  if (ret < 0) {
420  fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
421  exit(1);
422  }
423 
424  /* allocate and init a re-usable frame */
425  ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
426  if (!ost->frame) {
427  fprintf(stderr, "Could not allocate video frame\n");
428  exit(1);
429  }
430 
431  /* If the output format is not YUV420P, then a temporary YUV420P
432  * picture is needed too. It is then converted to the required
433  * output format. */
434  ost->tmp_frame = NULL;
435  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
437  if (!ost->tmp_frame) {
438  fprintf(stderr, "Could not allocate temporary picture\n");
439  exit(1);
440  }
441  }
442 
443  /* copy the stream parameters to the muxer */
445  if (ret < 0) {
446  fprintf(stderr, "Could not copy the stream parameters\n");
447  exit(1);
448  }
449 }
450 
451 /* Prepare a dummy image. */
452 static void fill_yuv_image(AVFrame *pict, int frame_index,
453  int width, int height)
454 {
455  int x, y, i;
456 
457  i = frame_index;
458 
459  /* Y */
460  for (y = 0; y < height; y++)
461  for (x = 0; x < width; x++)
462  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
463 
464  /* Cb and Cr */
465  for (y = 0; y < height / 2; y++) {
466  for (x = 0; x < width / 2; x++) {
467  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
468  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
469  }
470  }
471 }
472 
474 {
475  AVCodecContext *c = ost->enc;
476 
477  /* check if we want to generate more frames */
478  if (av_compare_ts(ost->next_pts, c->time_base,
479  STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
480  return NULL;
481 
482  /* when we pass a frame to the encoder, it may keep a reference to it
483  * internally; make sure we do not overwrite it here */
484  if (av_frame_make_writable(ost->frame) < 0)
485  exit(1);
486 
487  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
488  /* as we only generate a YUV420P picture, we must convert it
489  * to the codec pixel format if needed */
490  if (!ost->sws_ctx) {
491  ost->sws_ctx = sws_getContext(c->width, c->height,
493  c->width, c->height,
494  c->pix_fmt,
496  if (!ost->sws_ctx) {
497  fprintf(stderr,
498  "Could not initialize the conversion context\n");
499  exit(1);
500  }
501  }
502  fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
503  sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
504  ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
505  ost->frame->linesize);
506  } else {
507  fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
508  }
509 
510  ost->frame->pts = ost->next_pts++;
511 
512  return ost->frame;
513 }
514 
515 /*
516  * encode one video frame and send it to the muxer
517  * return 1 when encoding is finished, 0 otherwise
518  */
520 {
521  return write_frame(oc, ost->enc, ost->st, get_video_frame(ost));
522 
523 }
524 
526 {
527  avcodec_free_context(&ost->enc);
528  av_frame_free(&ost->frame);
529  av_frame_free(&ost->tmp_frame);
530  sws_freeContext(ost->sws_ctx);
531  swr_free(&ost->swr_ctx);
532 }
533 
534 /**************************************************************/
535 /* media file output */
536 
537 int main(int argc, char **argv)
538 {
539  OutputStream video_st = { 0 }, audio_st = { 0 };
540  const char *filename;
541  AVOutputFormat *fmt;
542  AVFormatContext *oc;
543  AVCodec *audio_codec, *video_codec;
544  int ret;
545  int have_video = 0, have_audio = 0;
546  int encode_video = 0, encode_audio = 0;
547  AVDictionary *opt = NULL;
548  int i;
549 
550  if (argc < 2) {
551  printf("usage: %s output_file\n"
552  "API example program to output a media file with libavformat.\n"
553  "This program generates a synthetic audio and video stream, encodes and\n"
554  "muxes them into a file named output_file.\n"
555  "The output format is automatically guessed according to the file extension.\n"
556  "Raw images can also be output by using '%%d' in the filename.\n"
557  "\n", argv[0]);
558  return 1;
559  }
560 
561  filename = argv[1];
562  for (i = 2; i+1 < argc; i+=2) {
563  if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
564  av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
565  }
566 
567  /* allocate the output media context */
568  avformat_alloc_output_context2(&oc, NULL, NULL, filename);
569  if (!oc) {
570  printf("Could not deduce output format from file extension: using MPEG.\n");
571  avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
572  }
573  if (!oc)
574  return 1;
575 
576  fmt = oc->oformat;
577 
578  /* Add the audio and video streams using the default format codecs
579  * and initialize the codecs. */
580  if (fmt->video_codec != AV_CODEC_ID_NONE) {
581  add_stream(&video_st, oc, &video_codec, fmt->video_codec);
582  have_video = 1;
583  encode_video = 1;
584  }
585  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
586  add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
587  have_audio = 1;
588  encode_audio = 1;
589  }
590 
591  /* Now that all the parameters are set, we can open the audio and
592  * video codecs and allocate the necessary encode buffers. */
593  if (have_video)
594  open_video(oc, video_codec, &video_st, opt);
595 
596  if (have_audio)
597  open_audio(oc, audio_codec, &audio_st, opt);
598 
599  av_dump_format(oc, 0, filename, 1);
600 
601  /* open the output file, if needed */
602  if (!(fmt->flags & AVFMT_NOFILE)) {
603  ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
604  if (ret < 0) {
605  fprintf(stderr, "Could not open '%s': %s\n", filename,
606  av_err2str(ret));
607  return 1;
608  }
609  }
610 
611  /* Write the stream header, if any. */
612  ret = avformat_write_header(oc, &opt);
613  if (ret < 0) {
614  fprintf(stderr, "Error occurred when opening output file: %s\n",
615  av_err2str(ret));
616  return 1;
617  }
618 
619  while (encode_video || encode_audio) {
620  /* select the stream to encode */
621  if (encode_video &&
622  (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
623  audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
624  encode_video = !write_video_frame(oc, &video_st);
625  } else {
626  encode_audio = !write_audio_frame(oc, &audio_st);
627  }
628  }
629 
630  /* Write the trailer, if any. The trailer must be written before you
631  * close the CodecContexts open when you wrote the header; otherwise
632  * av_write_trailer() may try to use memory that was freed on
633  * av_codec_close(). */
634  av_write_trailer(oc);
635 
636  /* Close each codec. */
637  if (have_video)
638  close_stream(oc, &video_st);
639  if (have_audio)
640  close_stream(oc, &audio_st);
641 
642  if (!(fmt->flags & AVFMT_NOFILE))
643  /* Close the output file. */
644  avio_closep(&oc->pb);
645 
646  /* free the stream */
648 
649  return 0;
650 }
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1127
int64_t next_pts
Definition: muxing.c:58
float, planar
Definition: samplefmt.h:69
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:535
static enum AVPixelFormat pix_fmt
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
int main(int argc, char **argv)
Definition: muxing.c:537
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1236
int64_t bit_rate
the average bitrate
Definition: avcodec.h:576
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:339
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:786
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:338
enum AVCodecID video_codec
default video codec
Definition: avformat.h:511
static AVFormatContext * fmt_ctx
int index
stream index in AVFormatContext
Definition: avformat.h:877
float tincr
Definition: muxing.c:64
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:675
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
char filename[1024]
Definition: dashenc.c:128
static AVPacket pkt
#define AV_CH_LAYOUT_STEREO
static void add_stream(OutputStream *ost, AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
Definition: muxing.c:123
AVCodec.
Definition: codec.h:190
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:408
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:649
static void close_stream(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:525
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Format I/O context.
Definition: avformat.h:1351
#define SCALE_FLAGS
Definition: muxing.c:50
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:519
uint8_t
Round toward +infinity.
Definition: mathematics.h:83
av_cold struct SwrContext * swr_alloc(void)
Allocate SwrContext.
Definition: options.c:149
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
Definition: muxing.c:70
AVOptions.
timestamp utils, mostly useful for debugging/logging purposes
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:381
int id
Format-specific stream ID.
Definition: avformat.h:883
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4450
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1419
#define height
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
Definition: utils.c:1912
struct SwrContext * swr_ctx
Definition: muxing.c:67
#define AVERROR_EOF
End of file.
Definition: error.h:55
external API header
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:640
libswresample public header
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:46
int width
Definition: frame.h:366
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:713
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
Definition: muxing.c:385
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
The libswresample context.
int capabilities
Codec capabilities.
Definition: codec.h:209
AVStream * video_st
Definition: movenc.c:59
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:926
int samples_count
Definition: muxing.c:59
simple assert() macros that are a bit more flexible than ISO C assert().
AVFrame * frame
Definition: muxing.c:61
int avformat_alloc_output_context2(AVFormatContext **ctx, ff_const59 AVOutputFormat *oformat, const char *format_name, const char *filename)
Allocate an AVFormatContext for an output format.
Definition: mux.c:135
int64_t swr_get_delay(struct SwrContext *s, int64_t base)
Gets the delay the next input sample will experience relative to the next output sample.
Definition: swresample.c:867
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:122
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1237
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:485
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1407
audio channel layout utility functions
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
Definition: muxing.c:217
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:505
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:172
#define width
int width
picture width / height.
Definition: avcodec.h:699
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:461
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2332
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
Definition: muxing.c:452
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:519
enum AVCodecID codec_id
Definition: vaapi_decode.c:369
int mb_decision
macroblock decision mode
Definition: avcodec.h:1014
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
#define STREAM_PIX_FMT
Definition: muxing.c:48
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
AVCodecContext * enc
Definition: muxing.c:55
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1370
if(ret)
Stream structure.
Definition: avformat.h:876
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1206
sample_rate
#define STREAM_DURATION
Definition: muxing.c:46
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:187
enum AVCodecID codec_id
Definition: avcodec.h:536
int sample_rate
samples per second
Definition: avcodec.h:1186
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
AVIOContext * pb
I/O context.
Definition: avformat.h:1393
AVStream * audio_st
Definition: movenc.c:59
main external API structure.
Definition: avcodec.h:526
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
static AVFrame * get_video_frame(OutputStream *ost)
Definition: muxing.c:473
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don&#39;t need to export the SwsContext.
Definition: swscale.c:744
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:307
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
int sample_rate
Sample rate of the audio data.
Definition: frame.h:480
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2100
#define STREAM_FRAME_RATE
Definition: muxing.c:47
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:550
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4379
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:714
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:624
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:329
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:721
static AVStream * ost
Main libavformat public API header.
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:458
signed 16 bits
Definition: samplefmt.h:61
AVStream * st
Definition: muxing.c:54
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:245
float t
Definition: muxing.c:64
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1196
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
int channels
number of audio channels
Definition: avcodec.h:1187
enum AVCodecID audio_codec
default audio codec
Definition: avformat.h:510
printf("static const uint8_t my_array[100] = {\n")
AVFrame * tmp_frame
Definition: muxing.c:62
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:362
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1251
int height
Definition: frame.h:366
#define M_PI
Definition: mathematics.h:52
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1023
int stream_index
Definition: packet.h:365
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:905
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags)
Definition: opt.c:704
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:340
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1182
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
float tincr2
Definition: muxing.c:64
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
for(j=16;j >0;--j)
struct SwsContext * sws_ctx
Definition: muxing.c:66
int i
Definition: input.c:407
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c, AVStream *st, AVFrame *frame)
Definition: muxing.c:81
static AVFrame * get_audio_frame(OutputStream *ost)
Definition: muxing.c:310
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:152