32 #define BITSTREAM_READER_LE 41 #define MAX_SUBFRAMES 8 42 #define MAX_PREDICTORS 256
75 int8_t coding_mode[128];
83 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 160, 192, 224, 256, 0,
93 { 0x01, 0x0000001, 0x0000001, 0x0000003, 0x0000008 },
94 { 0x02, 0x0000003, 0x0000001, 0x0000007, 0x0000006 },
95 { 0x03, 0x0000005, 0x0000002, 0x000000E, 0x000000D },
96 { 0x03, 0x0000003, 0x0000003, 0x000000D, 0x0000018 },
97 { 0x04, 0x000000B, 0x0000004, 0x000001C, 0x0000019 },
98 { 0x04, 0x0000006, 0x0000006, 0x000001A, 0x0000030 },
99 { 0x05, 0x0000016, 0x0000008, 0x0000038, 0x0000032 },
100 { 0x05, 0x000000C, 0x000000C, 0x0000034, 0x0000060 },
101 { 0x06, 0x000002C, 0x0000010, 0x0000070, 0x0000064 },
102 { 0x06, 0x0000018, 0x0000018, 0x0000068, 0x00000C0 },
103 { 0x07, 0x0000058, 0x0000020, 0x00000E0, 0x00000C8 },
104 { 0x07, 0x0000030, 0x0000030, 0x00000D0, 0x0000180 },
105 { 0x08, 0x00000B0, 0x0000040, 0x00001C0, 0x0000190 },
106 { 0x08, 0x0000060, 0x0000060, 0x00001A0, 0x0000300 },
107 { 0x09, 0x0000160, 0x0000080, 0x0000380, 0x0000320 },
108 { 0x09, 0x00000C0, 0x00000C0, 0x0000340, 0x0000600 },
109 { 0x0A, 0x00002C0, 0x0000100, 0x0000700, 0x0000640 },
110 { 0x0A, 0x0000180, 0x0000180, 0x0000680, 0x0000C00 },
111 { 0x0B, 0x0000580, 0x0000200, 0x0000E00, 0x0000C80 },
112 { 0x0B, 0x0000300, 0x0000300, 0x0000D00, 0x0001800 },
113 { 0x0C, 0x0000B00, 0x0000400, 0x0001C00, 0x0001900 },
114 { 0x0C, 0x0000600, 0x0000600, 0x0001A00, 0x0003000 },
115 { 0x0D, 0x0001600, 0x0000800, 0x0003800, 0x0003200 },
116 { 0x0D, 0x0000C00, 0x0000C00, 0x0003400, 0x0006000 },
117 { 0x0E, 0x0002C00, 0x0001000, 0x0007000, 0x0006400 },
118 { 0x0E, 0x0001800, 0x0001800, 0x0006800, 0x000C000 },
119 { 0x0F, 0x0005800, 0x0002000, 0x000E000, 0x000C800 },
120 { 0x0F, 0x0003000, 0x0003000, 0x000D000, 0x0018000 },
121 { 0x10, 0x000B000, 0x0004000, 0x001C000, 0x0019000 },
122 { 0x10, 0x0006000, 0x0006000, 0x001A000, 0x0030000 },
123 { 0x11, 0x0016000, 0x0008000, 0x0038000, 0x0032000 },
124 { 0x11, 0x000C000, 0x000C000, 0x0034000, 0x0060000 },
125 { 0x12, 0x002C000, 0x0010000, 0x0070000, 0x0064000 },
126 { 0x12, 0x0018000, 0x0018000, 0x0068000, 0x00C0000 },
127 { 0x13, 0x0058000, 0x0020000, 0x00E0000, 0x00C8000 },
128 { 0x13, 0x0030000, 0x0030000, 0x00D0000, 0x0180000 },
129 { 0x14, 0x00B0000, 0x0040000, 0x01C0000, 0x0190000 },
130 { 0x14, 0x0060000, 0x0060000, 0x01A0000, 0x0300000 },
131 { 0x15, 0x0160000, 0x0080000, 0x0380000, 0x0320000 },
132 { 0x15, 0x00C0000, 0x00C0000, 0x0340000, 0x0600000 },
133 { 0x16, 0x02C0000, 0x0100000, 0x0700000, 0x0640000 },
134 { 0x16, 0x0180000, 0x0180000, 0x0680000, 0x0C00000 },
135 { 0x17, 0x0580000, 0x0200000, 0x0E00000, 0x0C80000 },
136 { 0x17, 0x0300000, 0x0300000, 0x0D00000, 0x1800000 },
137 { 0x18, 0x0B00000, 0x0400000, 0x1C00000, 0x1900000 },
138 { 0x18, 0x0600000, 0x0600000, 0x1A00000, 0x3000000 },
139 { 0x19, 0x1600000, 0x0800000, 0x3800000, 0x3200000 },
140 { 0x19, 0x0C00000, 0x0C00000, 0x3400000, 0x6000000 },
141 { 0x1A, 0x2C00000, 0x1000000, 0x7000000, 0x6400000 },
142 { 0x1A, 0x1800000, 0x1800000, 0x6800000, 0xC000000 },
207 unsigned a1 = *coeffs++;
208 for (i = 0; i < length - 1 >> 1; i++) {
210 coeffs[1] += (unsigned)*coeffs;
216 }
else if (mode == 2) {
217 unsigned a1 = coeffs[1];
218 unsigned a2 = a1 + *coeffs;
222 for (i = 0; i < length - 2 >> 1; i++) {
223 unsigned a3 = *coeffs +
a1;
224 unsigned a4 = a3 +
a2;
234 }
else if (mode == 3) {
235 unsigned a1 = coeffs[1];
236 unsigned a2 = a1 + *coeffs;
239 unsigned a3 = coeffs[2];
240 unsigned a4 = a3 +
a1;
241 unsigned a5 = a4 +
a2;
244 for (i = 0; i < length - 3; i++) {
262 memset(decoded, 0, len *
sizeof(*decoded));
270 for (i = 0; i <
len; i++) {
278 if (scale_bits > 0) {
279 if (scale_bits == 7) {
293 decoded[
i] = (x >> 1) ^ -(x & 1);
310 wlength = length / s->
uval;
312 rval = length - (wlength * s->
uval);
314 if (rval < s->uval / 2)
319 if (wlength <= 1 || wlength > 128)
324 for (i = 1; i < wlength; i++) {
336 mode += (-sign ^ (c - 1)) + sign;
350 while (i < wlength) {
355 if (i >= wlength - 1)
387 int subframe_size,
int prev_subframe_size)
390 int x, y,
i, j,
ret = 0;
391 int dshift,
size, filter_quant, filter_order;
399 if (prev_subframe_size > 0 &&
get_bits1(gb)) {
400 if (filter_order > prev_subframe_size)
403 decoded -= filter_order;
404 subframe_size += filter_order;
406 if (filter_order > subframe_size)
411 if (filter_order > subframe_size)
430 filter_quant -=
get_bits(gb, 3) + 1;
431 if (filter_quant < 3)
439 if (filter_order > 4) {
442 for (i = 4; i < filter_order; i++) {
450 for (i = 1; i < filter_order; i++) {
451 uint32_t *p1 = &tfilter[0];
452 uint32_t *p2 = &tfilter[i - 1];
454 for (j = 0; j < (i + 1) / 2; j++) {
464 x = 1 << (32 - (15 - filter_quant));
465 y = 1 << ((15 - filter_quant) - 1);
466 for (i = 0, j = filter_order - 1; i < filter_order / 2; i++, j--) {
467 s->
filter[j] = x - ((tfilter[
i] + y) >> (15 - filter_quant));
468 s->
filter[
i] = x - ((tfilter[j] + y) >> (15 - filter_quant));
472 subframe_size - filter_order)) < 0)
475 for (i = 0; i < filter_order; i++)
476 s->
residues[i] = *decoded++ >> dshift;
479 x = subframe_size - filter_order;
483 for (i = 0; i <
tmp; i++) {
484 int v = 1 << (filter_quant - 1);
486 if (filter_order & -16)
489 for (j = filter_order & -16; j < filter_order; j += 4) {
495 v = (
av_clip_intp2(v >> filter_quant, 13) * (1 << dshift)) - (unsigned)*decoded;
497 s->
residues[filter_order +
i] = v >> dshift;
516 int i = 0,
ret, prev = 0;
567 length += s->
dmode < 6;
590 int length2, order_half, filter_order, dval1, dval2;
591 int tmp, x, code_size;
601 for (i = 0; i < filter_order; i++) {
607 order_half = filter_order / 2;
608 length2 = length - (filter_order - 1);
612 for (i = 0; i < order_half; i++) {
621 for (i = length2 + order_half; i <
length; i++) {
629 for (i = 0; i < filter_order; i++)
634 for (; length2 > 0; length2 -=
tmp) {
635 tmp =
FFMIN(length2, x);
637 for (i = 0; i < tmp - (tmp == length2); i++)
638 s->
residues[filter_order + i] = *p2++ >> dshift;
640 for (i = 0; i <
tmp; i++) {
643 if (filter_order == 16) {
684 int chan,
i,
ret, hsize;
716 "invalid number of channels: %d\n", s->
ti.
channels);
721 "unsupported number of channels: %d\n", s->
ti.
channels);
764 for (chan = 0; chan < avctx->
channels; chan++)
769 for (chan = 0; chan < avctx->
channels; chan++) {
776 for (chan = 0; chan < avctx->
channels; chan++)
798 for (i = 0; i < chan; i++) {
804 if (ch_mask & 1 << nbit)
813 "invalid channel 2 (%d) for %d channel(s)\n",
829 ch_mask |= 1 << nbit;
833 for (i = 0; i < chan; i++) {
839 for (i = 0; i < chan; i++) {
858 for (chan = 0; chan < avctx->
channels; chan++) {
889 for (chan = 0; chan < avctx->
channels; chan++) {
893 samples[i] = decoded[i] + 0x80U;
897 for (chan = 0; chan < avctx->
channels; chan++) {
901 samples[i] = decoded[i];
905 for (chan = 0; chan < avctx->
channels; chan++) {
908 samples[i] *= 1
U << 8;
static int set_bps_params(AVCodecContext *avctx)
static const uint16_t predictor_sizes[]
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int ff_tak_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb, TAKStreamInfo *ti, int log_level_offset)
Validate and decode a frame header.
static int shift(int a, int b)
int16_t subframe_len[MAX_SUBFRAMES]
subframe length in samples
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
av_cold void ff_takdsp_init(TAKDSPContext *c)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static av_cold int init(AVCodecContext *avctx)
void(* decorrelate_ls)(int32_t *p1, int32_t *p2, int length)
MCDParam mcdparams[TAK_MAX_CHANNELS]
multichannel decorrelation parameters
int16_t filter[MAX_PREDICTORS]
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
static int decode_subframe(TAKDecContext *s, int32_t *decoded, int subframe_size, int prev_subframe_size)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
#define FF_ARRAY_ELEMS(a)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static int decode_segment(TAKDecContext *s, int8_t mode, int32_t *decoded, int len)
static int get_sbits(GetBitContext *s, int n)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
int8_t lpc_mode[TAK_MAX_CHANNELS]
unsigned int decode_buffer_size
enum AVSampleFormat sample_fmt
audio sample format
av_cold void ff_audiodsp_init(AudioDSPContext *c)
static av_cold int tak_decode_close(AVCodecContext *avctx)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Multithreading support functions.
int ff_tak_check_crc(const uint8_t *buf, unsigned int buf_size)
AVCodecContext * avctx
parent AVCodecContext
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
static int get_bits_count(const GetBitContext *s)
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
static int decode_channel(TAKDecContext *s, int chan)
#define MAX_SUBFRAMES
max number of subframes per channel
int8_t index
index into array of decorrelation types
static int get_bits_left(GetBitContext *gb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
const char * name
Name of the codec implementation.
int8_t present
decorrelation parameter availability for this channel
static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
uint64_t channel_layout
Audio channel layout.
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
common internal API header
static av_cold int tak_decode_init(AVCodecContext *avctx)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
void(* decorrelate_sr)(int32_t *p1, int32_t *p2, int length)
static const struct CParam xcodes[50]
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
static int get_bits_esc4(GetBitContext *gb)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_EF_EXPLODE
abort decoding on minor error detection
static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
static void decode_lpc(int32_t *coeffs, int mode, int length)
void(* decorrelate_sf)(int32_t *p1, int32_t *p2, int length, int dshift, int dfactor)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
#define TAK_MIN_FRAME_HEADER_BYTES
int32_t(* scalarproduct_int16)(const int16_t *v1, const int16_t *v2, int len)
Calculate scalar product of two vectors.
int sample_rate
samples per second
int16_t predictors[MAX_PREDICTORS]
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
main external API structure.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
int32_t * decoded[TAK_MAX_CHANNELS]
decoded samples for each channel
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits(GetBitContext *s, int n)
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
TAK (Tom's lossless Audio Kompressor) decoder/demuxer common functions.
GetBitContext gb
bitstream reader initialized to start at the current frame
int8_t dmode
channel decorrelation type in the current frame
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data...
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int nb_samples
number of samples in the current frame
void(* decorrelate_sm)(int32_t *p1, int32_t *p2, int length)
common internal api header.
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
static const int8_t mc_dmodes[]
int nb_subframes
number of subframes in the current frame
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, const uint8_t *buf, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Fill plane data pointers and linesize for samples with sample format sample_fmt.
static int tak_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *pkt)
int channels
number of audio channels
static const uint8_t * align_get_bits(GetBitContext *s)
Filter the word “frame” indicates either a video frame or a group of audio samples
static void set_sample_rate_params(AVCodecContext *avctx)
int8_t sample_shift[TAK_MAX_CHANNELS]
shift applied to every sample in the channel
#define FFSWAP(type, a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
This structure stores compressed data.
mode
Use these values in ebur128_init (or'ed).
int nb_samples
number of audio samples (per channel) described by this frame
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.