Go to the documentation of this file.
47 for (
int i = 0;
i < 8;
i++)
48 s->table[0][
i] = (
i - 159.5f) / 160.f;
49 for (
int i = 0;
i < 11;
i++)
50 s->table[0][
i + 8] = (
i - 37.5f) / 40.f;
51 for (
int i = 0;
i < 27;
i++)
52 s->table[0][
i + 8 + 11] = (
i - 13.f) / 20.f;
53 for (
int i = 0;
i < 11;
i++)
54 s->table[0][
i + 8 + 11 + 27] = (
i + 27.5f) / 40.f;
55 for (
int i = 0;
i < 7;
i++)
56 s->table[0][
i + 8 + 11 + 27 + 11] = (
i + 152.5f) / 160.f;
58 memcpy(
s->table[1],
s->table[0],
sizeof(
s->table[0]));
60 for (
int i = 0;
i < 7;
i++)
61 s->table[2][
i] = (
i - 33.5f) / 40.f;
62 for (
int i = 0;
i < 25;
i++)
63 s->table[2][
i + 7] = (
i - 13.f) / 20.f;
65 for (
int i = 0;
i < 32;
i++)
66 s->table[3][
i] = -
s->table[2][31 -
i];
68 for (
int i = 0;
i < 16;
i++)
69 s->table[4][
i] =
i * 0.22f / 3.f - 0.6f;
71 for (
int i = 0;
i < 16;
i++)
72 s->table[5][
i] =
i * 0.20f / 3.f - 0.3f;
74 for (
int i = 0;
i < 8;
i++)
75 s->table[6][
i] =
i * 0.36f / 3.f - 0.4f;
77 for (
int i = 0;
i < 8;
i++)
78 s->table[7][
i] =
i * 0.34f / 3.f - 0.2f;
96 return r & ((1 <<
bits) - 1);
99 static const uint8_t
bits[8] = { 6, 6, 5, 5, 4, 0, 3, 3, };
115 frame->nb_samples = subframes * 256;
121 for (
int subframe = 0; subframe < subframes; subframe++) {
124 float result[256] = { 0 };
126 int inds[4], pads[4];
130 for (
int i = 0;
i < 10;
i++)
131 src[
i] = bytestream2_get_le32(&gb);
133 for (
int i = 0;
i < 8;
i++)
136 for (
int i = 0;
i < 4;
i++)
139 for (
int i = 0;
i < 4;
i++)
142 for (
int i = 0, index5 = 0;
i < 4;
i++) {
145 for (
int j = 0,
tmp = 0; j < 21; j++) {
150 index5 =
FFMIN(2 * index5 +
tmp % 2, 63);
153 m[2] =
s->table[5][index5];
156 for (
int i = 0;
i < 256;
i++) {
159 for (
int j = 0; j < 8; j++) {
160 x -= m[j] * ch->
f[j];
161 ch->
f[j] += m[j] * x;
164 memmove(&ch->
f[0], &ch->
f[1],
sizeof(
float) * 7);
189 .
p.
name =
"fastaudio",
@ AV_SAMPLE_FMT_FLTP
float, planar
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_cold int fastaudio_close(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
int nb_channels
Number of channels in this layout.
AVCodec p
The public AVCodec.
AVChannelLayout ch_layout
Audio channel layout.
static av_always_inline float av_int2float(uint32_t i)
Reinterpret a 32-bit integer as a float.
static av_cold int fastaudio_init(AVCodecContext *avctx)
static void set_sample(int i, int j, int v, float *result, int *pads, float value)
#define FF_CODEC_DECODE_CB(func)
#define CODEC_LONG_NAME(str)
const FFCodec ff_fastaudio_decoder
and forward the result(frame or status change) to the corresponding input. If nothing is possible
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
enum AVSampleFormat sample_fmt
audio sample format
static int read_bits(int bits, int *ppos, unsigned *src)
#define i(width, name, range_min, range_max)
AVSampleFormat
Audio sample formats.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const char * name
Name of the codec implementation.
void * av_calloc(size_t nmemb, size_t size)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
This structure stores compressed data.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static int fastaudio_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
static const uint8_t bits[8]