59 #define FREEZE_INTERVAL 128 
   81         int frontier  = 1 << avctx->
trellis;
 
  120         bytestream_put_le16(&extradata, avctx->
frame_size);
 
  121         bytestream_put_le16(&extradata, 7); 
 
  122         for (i = 0; i < 7; i++) {
 
  169     int nibble = 
FFMIN(7, abs(delta) * 4 /
 
  183     int nibble = 8*(delta < 0);
 
  186     diff = delta + (step >> 3);
 
  229     nibble = (nibble + bias) / c->
idelta;
 
  230     nibble = av_clip_intp2(nibble, 3) & 0x0F;
 
  232     predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->
idelta;
 
  235     c->
sample1 = av_clip_int16(predictor);
 
  256     nibble = 
FFMIN(7, abs(delta) * 4 / c->
step) + (delta < 0) * 8;
 
  261     c->
step = av_clip(c->
step, 127, 24576);
 
  267                                    const int16_t *samples, 
uint8_t *dst,
 
  272     const int frontier = 1 << avctx->
trellis;
 
  279     int pathn = 0, froze = -1, i, j, k, generation = 0;
 
  281     memset(hash, 0xff, 65536 * 
sizeof(*hash));
 
  283     memset(nodep_buf, 0, 2 * frontier * 
sizeof(*nodep_buf));
 
  284     nodes[0]          = node_buf + frontier;
 
  298             nodes[0]->
step    = 127;
 
  306     for (i = 0; i < 
n; i++) {
 
  311         memset(nodes_next, 0, frontier * 
sizeof(
TrellisNode*));
 
  312         for (j = 0; j < frontier && nodes[j]; j++) {
 
  315             const int range = (j < frontier / 2) ? 1 : 0;
 
  316             const int step  = nodes[j]->step;
 
  320                                        (nodes[j]->sample2 * c->
coeff2)) / 64;
 
  321                 const int div  = (sample - 
predictor) / step;
 
  322                 const int nmin = av_clip(div-range, -8, 6);
 
  323                 const int nmax = av_clip(div+range, -7, 7);
 
  324                 for (nidx = nmin; nidx <= nmax; nidx++) {
 
  325                     const int nibble = nidx & 0xf;
 
  326                     int dec_sample   = predictor + nidx * step;
 
  327 #define STORE_NODE(NAME, STEP_INDEX)\ 
  333                     dec_sample = av_clip_int16(dec_sample);\ 
  334                     d = sample - dec_sample;\ 
  335                     ssd = nodes[j]->ssd + d*(unsigned)d;\ 
  340                     if (ssd < nodes[j]->ssd)\ 
  353                     h = &hash[(uint16_t) dec_sample];\ 
  354                     if (*h == generation)\ 
  356                     if (heap_pos < frontier) {\ 
  361                         pos = (frontier >> 1) +\ 
  362                               (heap_pos & ((frontier >> 1) - 1));\ 
  363                         if (ssd > nodes_next[pos]->ssd)\ 
  368                     u  = nodes_next[pos];\ 
  370                         av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\ 
  372                         nodes_next[pos] = u;\ 
  376                     u->step = STEP_INDEX;\ 
  377                     u->sample2 = nodes[j]->sample1;\ 
  378                     u->sample1 = dec_sample;\ 
  379                     paths[u->path].nibble = nibble;\ 
  380                     paths[u->path].prev   = nodes[j]->path;\ 
  384                         int parent = (pos - 1) >> 1;\ 
  385                         if (nodes_next[parent]->ssd <= ssd)\ 
  387                         FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\ 
  397 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ 
  398                 const int predictor = nodes[j]->sample1;\ 
  399                 const int div = (sample - predictor) * 4 / STEP_TABLE;\ 
  400                 int nmin = av_clip(div - range, -7, 6);\ 
  401                 int nmax = av_clip(div + range, -6, 7);\ 
  406                 for (nidx = nmin; nidx <= nmax; nidx++) {\ 
  407                     const int nibble = nidx < 0 ? 7 - nidx : nidx;\ 
  408                     int dec_sample = predictor +\ 
  410                                      ff_adpcm_yamaha_difflookup[nibble]) / 8;\ 
  411                     STORE_NODE(NAME, STEP_INDEX);\ 
  429         if (generation == 255) {
 
  430             memset(hash, 0xff, 65536 * 
sizeof(*hash));
 
  435         if (nodes[0]->ssd > (1 << 28)) {
 
  436             for (j = 1; j < frontier && nodes[j]; j++)
 
  437                 nodes[j]->ssd -= nodes[0]->ssd;
 
  443             p = &paths[nodes[0]->path];
 
  444             for (k = i; k > froze; k--) {
 
  453             memset(nodes + 1, 0, (frontier - 1) * 
sizeof(
TrellisNode*));
 
  457     p = &paths[nodes[0]->
path];
 
  458     for (i = n - 1; i > froze; i--) {
 
  464     c->
sample1    = nodes[0]->sample1;
 
  465     c->
sample2    = nodes[0]->sample2;
 
  467     c->
step       = nodes[0]->step;
 
  468     c->
idelta     = nodes[0]->step;
 
  474     int n, i, 
ch, st, pkt_size, ret;
 
  475     const int16_t *samples;
 
  481     samples = (
const int16_t *)frame->
data[0];
 
  500         for (ch = 0; ch < avctx->
channels; ch++) {
 
  513             for (ch = 0; ch < avctx->
channels; ch++) {
 
  515                                        buf + ch * blocks * 8, &c->
status[ch],
 
  518             for (i = 0; i < blocks; i++) {
 
  519                 for (ch = 0; ch < avctx->
channels; ch++) {
 
  520                     uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
 
  521                     for (j = 0; j < 8; j += 2)
 
  522                         *dst++ = buf1[j] | (buf1[j + 1] << 4);
 
  527             for (i = 0; i < blocks; i++) {
 
  528                 for (ch = 0; ch < avctx->
channels; ch++) {
 
  530                     const int16_t *smp = &samples_p[
ch][1 + i * 8];
 
  531                     for (j = 0; j < 8; j += 2) {
 
  546         for (ch = 0; ch < avctx->
channels; ch++) {
 
  554                 for (i = 0; i < 64; i++)
 
  558                 for (i = 0; i < 64; i += 2) {
 
  582         for (i = 0; i < avctx->
channels; i++) {
 
  596                                        buf + n, &c->
status[1], n,
 
  598             for (i = 0; i < 
n; i++) {
 
  610                              samples[2 * i + 1]));
 
  617         for (i = 0; i < avctx->
channels; i++) {
 
  623         for (i = 0; i < avctx->
channels; i++) {
 
  628         for (i = 0; i < avctx->
channels; i++)
 
  634         for (i = 0; i < avctx->
channels; i++)
 
  643                 for (i = 0; i < 
n; i += 2)
 
  644                     *dst++ = (buf[i] << 4) | buf[i + 1];
 
  650                 for (i = 0; i < 
n; i++)
 
  651                     *dst++ = (buf[i] << 4) | buf[n + i];
 
  655             for (i = 7 * avctx->
channels; i < avctx->block_align; i++) {
 
  671                 for (i = 0; i < 
n; i += 2)
 
  672                     *dst++ = buf[i] | (buf[i + 1] << 4);
 
  678                 for (i = 0; i < 
n; i++)
 
  679                     *dst++ = buf[i] | (buf[n + i] << 4);
 
  683             for (n *= avctx->
channels; n > 0; n--) {
 
  694     avpkt->
size = pkt_size;
 
  709 #define ADPCM_ENCODER(id_, name_, sample_fmts_, long_name_) \ 
  710 AVCodec ff_ ## name_ ## _encoder = {                        \ 
  712     .long_name      = NULL_IF_CONFIG_SMALL(long_name_),     \ 
  713     .type           = AVMEDIA_TYPE_AUDIO,                   \ 
  715     .priv_data_size = sizeof(ADPCMEncodeContext),           \ 
  716     .init           = adpcm_encode_init,                    \ 
  717     .encode2        = adpcm_encode_frame,                   \ 
  718     .close          = adpcm_encode_close,                   \ 
  719     .sample_fmts    = sample_fmts_,                         \ 
const struct AVCodec * codec
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data. 
static void put_sbits(PutBitContext *pb, int n, int32_t value)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit 
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers. 
uint8_t * extradata
some codecs need / can use extradata like Huffman tables. 
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv). 
static void predictor(uint8_t *src, int size)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered. 
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample. 
ADPCM encoder/decoder common header. 
#define STORE_NODE(NAME, STEP_INDEX)
const int16_t ff_adpcm_step_table[89]
This is the step table. 
const int8_t ff_adpcm_index_table[16]
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers. 
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
static void error(const char *err)
int frame_size
Number of samples per channel in an audio frame. 
const int16_t ff_adpcm_AdaptationTable[]
Libavcodec external API header. 
AVSampleFormat
Audio sample formats. 
int sample_rate
samples per second 
main external API structure. 
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data. 
#define ADPCM_ENCODER(id_, name_, sample_fmts_, long_name_)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes. 
#define FF_ALLOC_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
GLint GLenum GLboolean GLsizei stride
const int8_t ff_adpcm_yamaha_difflookup[]
common internal api header. 
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros. 
const int16_t ff_adpcm_yamaha_indexscale[]
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s. 
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
int trellis
trellis RD quantization 
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int channels
number of audio channels 
static enum AVSampleFormat sample_fmts[]
uint8_t ** extended_data
pointers to the data planes/channels. 
ADPCMChannelStatus status[6]
This structure stores compressed data. 
int nb_samples
number of audio samples (per channel) described by this frame 
static enum AVSampleFormat sample_fmts_p[]