Go to the documentation of this file.
   22 #include "config_components.h" 
   50 #define MAX_CHANNELS 2 
   81 #define LATTICE_SHIFT   10 
   82 #define SAMPLE_SHIFT    4 
   83 #define LATTICE_FACTOR  (1 << LATTICE_SHIFT) 
   84 #define SAMPLE_FACTOR   (1 << SAMPLE_SHIFT) 
   86 #define BASE_QUANT      0.6 
   87 #define RATE_VARIATION  3.0 
   91     return (
a+(1<<(
b-1))) >> 
b;
 
  102 #define put_rac(C,S,B) \ 
  106         rc_stat2[(S)-state][B]++;\ 
  121             for(
i=e-1; 
i>=0; 
i--){
 
  133             for(
i=e-1; 
i>=0; 
i--){
 
  160         for(
i=e-1; 
i>=0; 
i--){
 
  174     for (
i = 0; 
i < entries; 
i++)
 
  184     for (
i = 0; 
i < entries; 
i++)
 
  194     for (
i = 0; 
i < entries; 
i++)
 
  204     for (
i = 0; 
i < entries; 
i++)
 
  212 #define ADAPT_LEVEL 8 
  214 static int bits_to_store(uint64_t x)
 
  264     int i, j, x = 0, low_bits = 0, 
max = 0;
 
  265     int step = 256, 
pos = 0, dominant = 0, any = 0;
 
  276         for (
i = 0; 
i < entries; 
i++)
 
  277             energy += 
abs(buf[
i]);
 
  279         low_bits = bits_to_store(energy / (entries * 2));
 
  286     for (
i = 0; 
i < entries; 
i++)
 
  301     for (
i = 0; 
i <= 
max; 
i++)
 
  303         for (j = 0; j < entries; j++)
 
  311         int steplet = 
step >> 8;
 
  313         if (
pos + steplet > x)
 
  316         for (
i = 0; 
i < steplet; 
i++)
 
  331             while (((
pos + interloper) < x) && (
bits[
pos + interloper] == dominant))
 
  335             write_uint_max(pb, interloper, (
step >> 8) - 1);
 
  337             pos += interloper + 1;
 
  344             dominant = !dominant;
 
  349     for (
i = 0; 
i < entries; 
i++)
 
  361     int i, low_bits = 0, x = 0;
 
  362     int n_zeros = 0, 
step = 256, dominant = 0;
 
  374             for (
i = 0; 
i < entries; 
i++)
 
  380     while (n_zeros < entries)
 
  382         int steplet = 
step >> 8;
 
  386             for (
i = 0; 
i < steplet; 
i++)
 
  387                 bits[x++] = dominant;
 
  400             for (
i = 0; 
i < actual_run; 
i++)
 
  401                 bits[x++] = dominant;
 
  403             bits[x++] = !dominant;
 
  406                 n_zeros += actual_run;
 
  416             dominant = !dominant;
 
  422     for (
i = 0; n_zeros < entries; 
i++)
 
  429                 level += 1 << low_bits;
 
  439             buf[
pos] += 1 << low_bits;
 
  448     for (
i = 0; 
i < entries; 
i++)
 
  462     for (
i = order-2; 
i >= 0; 
i--)
 
  466         for (j = 0, p = 
i+1; p < order; j++,p++)
 
  480     int *k_ptr = &(k[order-2]),
 
  481         *state_ptr = &(
state[order-2]);
 
  482     for (
i = order-2; 
i >= 0; 
i--, k_ptr--, state_ptr--)
 
  484         int k_value = *k_ptr, state_value = *state_ptr;
 
  489     for (
i = order-2; 
i >= 0; 
i--)
 
  505 #if CONFIG_SONIC_ENCODER || CONFIG_SONIC_LS_ENCODER 
  510 static void modified_levinson_durbin(
int *
window, 
int window_entries,
 
  511         int *
out, 
int out_entries, 
int channels, 
int *tap_quant)
 
  518     for (
i = 0; 
i < out_entries; 
i++)
 
  521         double xx = 0.0, xy = 0.0;
 
  524         int *state_ptr = &(
state[0]);
 
  525         j = window_entries - 
step;
 
  526         for (;j>0;j--,x_ptr++,state_ptr++)
 
  528             double x_value = *x_ptr;
 
  529             double state_value = *state_ptr;
 
  530             xx += state_value*state_value;
 
  531             xy += x_value*state_value;
 
  534         for (j = 0; j <= (window_entries - 
step); j++);
 
  537             double stateval = 
window[j];
 
  540             xx += stateval*stateval;
 
  541             xy += stepval*stateval;
 
  559         state_ptr = &(
state[0]);
 
  560         j = window_entries - 
step;
 
  561         for (;j>0;j--,x_ptr++,state_ptr++)
 
  563             int x_value = *x_ptr;
 
  564             int state_value = *state_ptr;
 
  569         for (j=0; j <= (window_entries - 
step); j++)
 
  572             int stateval=
state[j];
 
  580 static inline int code_samplerate(
int samplerate)
 
  584         case 44100: 
return 0;
 
  585         case 22050: 
return 1;
 
  586         case 11025: 
return 2;
 
  587         case 96000: 
return 3;
 
  588         case 48000: 
return 4;
 
  589         case 32000: 
return 5;
 
  590         case 24000: 
return 6;
 
  591         case 16000: 
return 7;
 
  615         s->decorrelation = 3;
 
  622         s->quantization = 0.0;
 
  628         s->quantization = 1.0;
 
  632     if (
s->num_taps < 32 || 
s->num_taps > 1024 || 
s->num_taps % 32) {
 
  638     s->tap_quant = 
av_calloc(
s->num_taps, 
sizeof(*
s->tap_quant));
 
  642     for (
i = 0; 
i < 
s->num_taps; 
i++)
 
  648     s->block_align = 2048LL*
s->samplerate/(44100*
s->downsampling);
 
  649     s->frame_size = 
s->channels*
s->block_align*
s->downsampling;
 
  651     s->tail_size = 
s->num_taps*
s->channels;
 
  656     s->predictor_k = 
av_calloc(
s->num_taps, 
sizeof(*
s->predictor_k) );
 
  660     coded_samples = 
av_calloc(
s->block_align, 
s->channels * 
sizeof(**
s->coded_samples));
 
  663     for (
i = 0; 
i < 
s->channels; 
i++, coded_samples += 
s->block_align)
 
  664         s->coded_samples[
i] = coded_samples;
 
  666     s->int_samples = 
av_calloc(
s->frame_size, 
sizeof(*
s->int_samples));
 
  668     s->window_size = ((2*
s->tail_size)+
s->frame_size);
 
  669     s->window = 
av_calloc(
s->window_size, 2 * 
sizeof(*
s->window));
 
  670     if (!
s->window || !
s->int_samples)
 
  681         if (
s->version >= 2) {
 
  686         put_bits(&pb, 4, code_samplerate(
s->samplerate));
 
  699     av_log(avctx, 
AV_LOG_INFO, 
"Sonic: ver: %d.%d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
 
  700         s->version, 
s->minor_version, 
s->lossless, 
s->decorrelation, 
s->num_taps, 
s->block_align, 
s->frame_size, 
s->downsampling);
 
  726     int i, j, ch, 
quant = 0, x = 0;
 
  739     for (
i = 0; 
i < 
s->frame_size; 
i++)
 
  743         for (
i = 0; 
i < 
s->frame_size; 
i++)
 
  746     switch(
s->decorrelation)
 
  749             for (
i = 0; 
i < 
s->frame_size; 
i += 
s->channels)
 
  751                 s->int_samples[
i] += 
s->int_samples[
i+1];
 
  752                 s->int_samples[
i+1] -= 
shift(
s->int_samples[
i], 1);
 
  756             for (
i = 0; 
i < 
s->frame_size; 
i += 
s->channels)
 
  757                 s->int_samples[
i+1] -= 
s->int_samples[
i];
 
  760             for (
i = 0; 
i < 
s->frame_size; 
i += 
s->channels)
 
  761                 s->int_samples[
i] -= 
s->int_samples[
i+1];
 
  765     memset(
s->window, 0, 
s->window_size * 
sizeof(*
s->window));
 
  767     for (
i = 0; 
i < 
s->tail_size; 
i++)
 
  768         s->window[x++] = 
s->tail[
i];
 
  770     for (
i = 0; 
i < 
s->frame_size; 
i++)
 
  771         s->window[x++] = 
s->int_samples[
i];
 
  773     for (
i = 0; 
i < 
s->tail_size; 
i++)
 
  776     for (
i = 0; 
i < 
s->tail_size; 
i++)
 
  777         s->tail[
i] = 
s->int_samples[
s->frame_size - 
s->tail_size + 
i];
 
  780     modified_levinson_durbin(
s->window, 
s->window_size,
 
  781                 s->predictor_k, 
s->num_taps, 
s->channels, 
s->tap_quant);
 
  786     for (ch = 0; ch < 
s->channels; ch++)
 
  789         for (
i = 0; 
i < 
s->block_align; 
i++)
 
  792             for (j = 0; j < 
s->downsampling; j++, x += 
s->channels)
 
  794             s->coded_samples[ch][
i] = sum;
 
  801         double energy1 = 0.0, energy2 = 0.0;
 
  802         for (ch = 0; ch < 
s->channels; ch++)
 
  804             for (
i = 0; 
i < 
s->block_align; 
i++)
 
  806                 double sample = 
s->coded_samples[ch][
i];
 
  812         energy2 = sqrt(energy2/(
s->channels*
s->block_align));
 
  813         energy1 = 
M_SQRT2*energy1/(
s->channels*
s->block_align);
 
  818         if (energy2 > energy1)
 
  832     for (ch = 0; ch < 
s->channels; ch++)
 
  835             for (
i = 0; 
i < 
s->block_align; 
i++)
 
  849 #if CONFIG_SONIC_DECODER 
  850 static const int samplerate_table[] =
 
  851     { 44100, 22050, 11025, 96000, 48000, 32000, 24000, 16000, 8000 };
 
  875     if (
s->version >= 2) {
 
  887         int sample_rate_index;
 
  889         sample_rate_index = 
get_bits(&gb, 4);
 
  894         s->samplerate = samplerate_table[sample_rate_index];
 
  896             s->channels, 
s->samplerate);
 
  912     if (
s->decorrelation != 3 && 
s->channels != 2) {
 
  918     if (!
s->downsampling) {
 
  927     if (
s->num_taps > 128)
 
  930     s->block_align = 2048LL*
s->samplerate/(44100*
s->downsampling);
 
  931     s->frame_size = 
s->channels*
s->block_align*
s->downsampling;
 
  934     if (
s->num_taps * 
s->channels > 
s->frame_size) {
 
  936                "number of taps times channels (%d * %d) larger than frame size %d\n",
 
  937                s->num_taps, 
s->channels, 
s->frame_size);
 
  941     av_log(avctx, 
AV_LOG_INFO, 
"Sonic: ver: %d.%d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
 
  942         s->version, 
s->minor_version, 
s->lossless, 
s->decorrelation, 
s->num_taps, 
s->block_align, 
s->frame_size, 
s->downsampling);
 
  945     s->tap_quant = 
av_calloc(
s->num_taps, 
sizeof(*
s->tap_quant));
 
  949     for (
i = 0; 
i < 
s->num_taps; 
i++)
 
  952     s->predictor_k = 
av_calloc(
s->num_taps, 
sizeof(*
s->predictor_k));
 
  954     tmp = 
av_calloc(
s->num_taps, 
s->channels * 
sizeof(**
s->predictor_state));
 
  957     for (
i = 0; 
i < 
s->channels; 
i++, 
tmp += 
s->num_taps)
 
  958         s->predictor_state[
i] = 
tmp;
 
  960     tmp = 
av_calloc(
s->block_align, 
s->channels * 
sizeof(**
s->coded_samples));
 
  963     for (
i = 0; 
i < 
s->channels; 
i++, 
tmp += 
s->block_align)
 
  964         s->coded_samples[
i]   = 
tmp;
 
  966     s->int_samples = 
av_calloc(
s->frame_size, 
sizeof(*
s->int_samples));
 
  988                               int *got_frame_ptr, 
AVPacket *avpkt)
 
  990     const uint8_t *buf = avpkt->
data;
 
  991     int buf_size = avpkt->
size;
 
  998     if (buf_size == 0) 
return 0;
 
 1014     for (
i = 0; 
i < 
s->num_taps; 
i++)
 
 1015         s->predictor_k[
i] *= (
unsigned) 
s->tap_quant[
i];
 
 1024     for (ch = 0; ch < 
s->channels; ch++)
 
 1035         for (
i = 0; 
i < 
s->block_align; 
i++)
 
 1037             for (j = 0; j < 
s->downsampling - 1; j++)
 
 1047         for (
i = 0; 
i < 
s->num_taps; 
i++)
 
 1048             s->predictor_state[ch][
i] = 
s->int_samples[
s->frame_size - 
s->channels + ch - 
i*
s->channels];
 
 1051     switch(
s->decorrelation)
 
 1054             for (
i = 0; 
i < 
s->frame_size; 
i += 
s->channels)
 
 1056                 s->int_samples[
i+1] += 
shift(
s->int_samples[
i], 1);
 
 1057                 s->int_samples[
i] -= 
s->int_samples[
i+1];
 
 1061             for (
i = 0; 
i < 
s->frame_size; 
i += 
s->channels)
 
 1062                 s->int_samples[
i+1] += 
s->int_samples[
i];
 
 1065             for (
i = 0; 
i < 
s->frame_size; 
i += 
s->channels)
 
 1066                 s->int_samples[
i] += 
s->int_samples[
i+1];
 
 1071         for (
i = 0; 
i < 
s->frame_size; 
i++)
 
 1075     for (
i = 0; 
i < 
s->frame_size; 
i++)
 
 1089     .
init           = sonic_decode_init,
 
 1090     .close          = sonic_decode_close,
 
 1097 #if CONFIG_SONIC_ENCODER 
 1106     .
init           = sonic_encode_init,
 
 1110     .close          = sonic_encode_close,
 
 1114 #if CONFIG_SONIC_LS_ENCODER 
 1116     .
p.
name         = 
"sonicls",
 
 1123     .
init           = sonic_encode_init,
 
 1127     .close          = sonic_encode_close,
 
  
static void error(const char *err)
static int intlist_write(RangeCoder *c, uint8_t *state, int *buf, int entries, int base_2_part)
int frame_size
Number of samples per channel in an audio frame.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int put_bytes_output(const PutBitContext *s)
int sample_rate
samples per second
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static unsigned read_uint_max(BonkContext *s, uint32_t max)
enum AVChannelOrder order
Channel order used in this layout.
int nb_channels
Number of channels in this layout.
static void skip_bits(GetBitContext *s, int n)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static SDL_Window * window
AVCodec p
The public AVCodec.
const struct AVCodec * codec
AVChannelLayout ch_layout
Audio channel layout.
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
#define FF_CODEC_ENCODE_CB(func)
static const uint8_t quant[64]
exp golomb vlc writing stuff
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_CODEC_CAP_EXPERIMENTAL
Codec is experimental and is thus avoided in favor of non experimental encoders.
#define FF_ARRAY_ELEMS(a)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static av_flatten int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
#define FF_CODEC_DECODE_CB(func)
static __device__ float floor(float a)
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
static int predictor_calc_error(int *k, int *state, int order, int error)
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
int * coded_samples[MAX_CHANNELS]
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
#define CODEC_LONG_NAME(str)
const FFCodec ff_sonic_encoder
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static int intlist_read(RangeCoder *c, uint8_t *state, int *buf, int entries, int base_2_part)
static __device__ float fabs(float a)
static void predictor_init_state(int *k, int *state, int order)
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
#define ROUNDED_DIV(a, b)
static unsigned int get_bits1(GetBitContext *s)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline av_flatten void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2])
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
const FFCodec ff_sonic_decoder
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
static void copy(const float *p1, float *p2, const int length)
enum AVSampleFormat sample_fmt
audio sample format
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
static void set_se_golomb(PutBitContext *pb, int i)
write signed exp golomb code.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
#define AV_LOG_INFO
Standard information.
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
AVSampleFormat
Audio sample formats.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const FFCodec ff_sonic_ls_encoder
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static int get_rac(RangeCoder *c, uint8_t *const state)
void * av_calloc(size_t nmemb, size_t size)
int * predictor_state[MAX_CHANNELS]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Filter the word “frame” indicates either a video frame or a group of audio samples
static int shift(int a, int b)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
This structure stores compressed data.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int shift_down(int a, int b)
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.