Show how to use the libavformat and libavcodec API to demux and decode audio and video data. Write the output as raw audio and input files to be played by ffplay.
{
fprintf(stderr, "Error: Width, height and pixel format have to be "
"constant in a rawvideo file, but the width, height or "
"pixel format of the input video changed:\n"
"old: width = %d, height = %d, format = %s\n"
"new: width = %d, height = %d, format = %s\n",
return -1;
}
return 0;
}
{
printf(
"audio_frame n:%d nb_samples:%d pts:%s\n",
return 0;
}
{
fprintf(stderr,
"Error submitting a packet for decoding (%s)\n",
av_err2str(
ret));
}
return 0;
fprintf(stderr,
"Error during decoding (%s)\n",
av_err2str(
ret));
}
else
}
return 0;
}
{
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
} else {
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
}
fprintf(stderr, "Failed to allocate the %s codec context\n",
}
fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
}
fprintf(stderr, "Failed to open %s codec\n",
}
*stream_idx = stream_index;
}
return 0;
}
{
struct sample_fmt_entry {
} sample_fmt_entries[] = {
};
struct sample_fmt_entry *entry = &sample_fmt_entries[
i];
if (sample_fmt == entry->sample_fmt) {
*fmt =
AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}
fprintf(stderr,
"sample format %s is not supported as output format\n",
return -1;
}
int main (
int argc,
char **argv)
{
if (argc != 4) {
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
"API example program to show how to read frames from an input file.\n"
"This program reads frames from a file, decodes them, and writes decoded\n"
"video frames to a rawvideo file named video_output_file, and decoded\n"
"audio frames to a rawaudio file named audio_output_file.\n",
argv[0]);
exit(1);
}
fprintf(stderr,
"Could not open source file %s\n",
src_filename);
exit(1);
}
fprintf(stderr, "Could not find stream information\n");
exit(1);
}
goto end;
}
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}
}
goto end;
}
}
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
goto end;
}
fprintf(stderr, "Could not allocate frame\n");
goto end;
}
fprintf(stderr, "Could not allocate packet\n");
goto end;
}
break;
}
printf(
"Demuxing succeeded.\n");
printf(
"Play the output video file with the command:\n"
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
}
const char *fmt;
printf(
"Warning: the sample format the decoder produced is planar "
"(%s). This example will output the first channel only.\n",
packed ? packed : "?");
n_channels = 1;
}
goto end;
printf(
"Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",
}
end:
}
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
AVPixelFormat
Pixel format.
static AVCodecContext * dec_ctx
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int sample_rate
samples per second
#define AVERROR_EOF
End of file.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
AVStream ** streams
A list of all streams in the file.
static int output_video_frame(AVFrame *frame)
static int get_format_from_sample_fmt(const char **fmt, enum AVSampleFormat sample_fmt)
static int decode_packet(AVCodecContext *dec, const AVPacket *pkt)
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
int nb_channels
Number of channels in this layout.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
static const char * video_dst_filename
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
const struct AVCodec * codec
AVChannelLayout ch_layout
Audio channel layout.
static int audio_stream_idx
static FILE * audio_dst_file
static const char * src_filename
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define FF_ARRAY_ELEMS(a)
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
static enum AVPixelFormat pix_fmt
static AVStream * video_stream
static FILE * video_dst_file
static int audio_frame_count
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
static AVCodecContext * audio_dec_ctx
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
static int video_stream_idx
AVCodecParameters * codecpar
Codec parameters associated with this stream.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
static const char * audio_dst_filename
static AVStream * audio_stream
enum AVSampleFormat sample_fmt
audio sample format
static int video_dst_linesize[4]
printf("static const uint8_t my_array[100] = {\n")
static AVFormatContext * fmt_ctx
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
#define i(width, name, range_min, range_max)
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
AVSampleFormat
Audio sample formats.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
@ AV_SAMPLE_FMT_S16
signed 16 bits
static uint8_t * video_dst_data[4]
static int open_codec_context(int *stream_idx, AVCodecContext **dec_ctx, AVFormatContext *fmt_ctx, enum AVMediaType type)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
main external API structure.
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
int main(int argc, char **argv)
static int video_frame_count
static int video_dst_bufsize
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
int width
picture width / height.
@ AV_SAMPLE_FMT_DBL
double
static AVCodecContext * video_dec_ctx
static int output_audio_frame(AVFrame *frame)
@ AV_SAMPLE_FMT_S32
signed 32 bits
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.