#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
{
if (*p == sample_fmt)
return 1;
p++;
}
return 0;
}
{
const int *p;
int best_samplerate = 0;
return 44100;
while (*p) {
if (!best_samplerate ||
abs(44100 - *p) <
abs(44100 - best_samplerate))
best_samplerate = *p;
p++;
}
return best_samplerate;
}
{
const uint64_t *p;
uint64_t best_ch_layout = 0;
int best_nb_channels = 0;
while (*p) {
best_ch_layout = *p;
}
p++;
}
return best_ch_layout;
}
{
fprintf(stderr, "Error sending the frame to the encoder\n");
exit(1);
}
return;
fprintf(stderr, "Error encoding audio frame\n");
exit(1);
}
}
}
int main(
int argc,
char **argv)
{
const char *filename;
float t, tincr;
if (argc <= 1) {
fprintf(stderr, "Usage: %s <output file>\n", argv[0]);
return 0;
}
filename = argv[1];
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
fprintf(stderr, "Encoder does not support sample format %s",
exit(1);
}
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename,
"wb");
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
fprintf(stderr, "could not allocate the packet\n");
exit(1);
}
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
frame->nb_samples =
c->frame_size;
frame->format =
c->sample_fmt;
frame->channel_layout =
c->channel_layout;
fprintf(stderr, "Could not allocate audio data buffers\n");
exit(1);
}
t = 0;
tincr = 2 *
M_PI * 440.0 /
c->sample_rate;
for (
i = 0;
i < 200;
i++) {
exit(1);
for (j = 0; j <
c->frame_size; j++) {
for (k = 1; k <
c->channels; k++)
t += tincr;
}
}
return 0;
}
int main(int argc, char **argv)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
#define AVERROR_EOF
End of file.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
#define AV_CH_LAYOUT_STEREO
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
const int * supported_samplerates
array of supported audio samplerates, or NULL if unknown, array is terminated by 0
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt, FILE *output)
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
#define i(width, name, range_min, range_max)
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16
signed 16 bits
static int select_sample_rate(const AVCodec *codec)
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
static int select_channel_layout(const AVCodec *codec)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
Filter the word “frame” indicates either a video frame or a group of audio samples
This structure stores compressed data.
const uint64_t * channel_layouts
array of support channel layouts, or NULL if unknown. array is terminated by 0