00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #include "avcodec.h"
00022 #include "bitstream.h"
00023 #include "bytestream.h"
00024
00056 #define BLKSIZE 1024
00057
00058
00059
00060 static const int index_table[16] = {
00061 -1, -1, -1, -1, 2, 4, 6, 8,
00062 -1, -1, -1, -1, 2, 4, 6, 8,
00063 };
00064
00069 static const int step_table[89] = {
00070 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
00071 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
00072 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
00073 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
00074 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
00075 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
00076 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
00077 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
00078 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
00079 };
00080
00081
00082
00083 static const int AdaptationTable[] = {
00084 230, 230, 230, 230, 307, 409, 512, 614,
00085 768, 614, 512, 409, 307, 230, 230, 230
00086 };
00087
00088 static const uint8_t AdaptCoeff1[] = {
00089 64, 128, 0, 48, 60, 115, 98
00090 };
00091
00092 static const int8_t AdaptCoeff2[] = {
00093 0, -64, 0, 16, 0, -52, -58
00094 };
00095
00096
00097 static const int xa_adpcm_table[5][2] = {
00098 { 0, 0 },
00099 { 60, 0 },
00100 { 115, -52 },
00101 { 98, -55 },
00102 { 122, -60 }
00103 };
00104
00105 static const int ea_adpcm_table[] = {
00106 0, 240, 460, 392, 0, 0, -208, -220, 0, 1,
00107 3, 4, 7, 8, 10, 11, 0, -1, -3, -4
00108 };
00109
00110
00111 static const int swf_index_tables[4][16] = {
00112 { -1, 2 },
00113 { -1, -1, 2, 4 },
00114 { -1, -1, -1, -1, 2, 4, 6, 8 },
00115 { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
00116 };
00117
00118 static const int yamaha_indexscale[] = {
00119 230, 230, 230, 230, 307, 409, 512, 614,
00120 230, 230, 230, 230, 307, 409, 512, 614
00121 };
00122
00123 static const int yamaha_difflookup[] = {
00124 1, 3, 5, 7, 9, 11, 13, 15,
00125 -1, -3, -5, -7, -9, -11, -13, -15
00126 };
00127
00128
00129
00130 typedef struct ADPCMChannelStatus {
00131 int predictor;
00132 short int step_index;
00133 int step;
00134
00135 int prev_sample;
00136
00137
00138 short sample1;
00139 short sample2;
00140 int coeff1;
00141 int coeff2;
00142 int idelta;
00143 } ADPCMChannelStatus;
00144
00145 typedef struct ADPCMContext {
00146 ADPCMChannelStatus status[6];
00147 } ADPCMContext;
00148
00149
00150
00151 #if CONFIG_ENCODERS
00152 static av_cold int adpcm_encode_init(AVCodecContext *avctx)
00153 {
00154 if (avctx->channels > 2)
00155 return -1;
00156
00157 if(avctx->trellis && (unsigned)avctx->trellis > 16U){
00158 av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
00159 return -1;
00160 }
00161
00162 switch(avctx->codec->id) {
00163 case CODEC_ID_ADPCM_IMA_WAV:
00164 avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1;
00165
00166 avctx->block_align = BLKSIZE;
00167
00168 break;
00169 case CODEC_ID_ADPCM_IMA_QT:
00170 avctx->frame_size = 64;
00171 avctx->block_align = 34 * avctx->channels;
00172 break;
00173 case CODEC_ID_ADPCM_MS:
00174 avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
00175
00176 avctx->block_align = BLKSIZE;
00177 break;
00178 case CODEC_ID_ADPCM_YAMAHA:
00179 avctx->frame_size = BLKSIZE * avctx->channels;
00180 avctx->block_align = BLKSIZE;
00181 break;
00182 case CODEC_ID_ADPCM_SWF:
00183 if (avctx->sample_rate != 11025 &&
00184 avctx->sample_rate != 22050 &&
00185 avctx->sample_rate != 44100) {
00186 av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n");
00187 return -1;
00188 }
00189 avctx->frame_size = 512 * (avctx->sample_rate / 11025);
00190 break;
00191 default:
00192 return -1;
00193 break;
00194 }
00195
00196 avctx->coded_frame= avcodec_alloc_frame();
00197 avctx->coded_frame->key_frame= 1;
00198
00199 return 0;
00200 }
00201
00202 static av_cold int adpcm_encode_close(AVCodecContext *avctx)
00203 {
00204 av_freep(&avctx->coded_frame);
00205
00206 return 0;
00207 }
00208
00209
00210 static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample)
00211 {
00212 int delta = sample - c->prev_sample;
00213 int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8;
00214 c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8);
00215 c->prev_sample = av_clip_int16(c->prev_sample);
00216 c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
00217 return nibble;
00218 }
00219
00220 static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample)
00221 {
00222 int predictor, nibble, bias;
00223
00224 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
00225
00226 nibble= sample - predictor;
00227 if(nibble>=0) bias= c->idelta/2;
00228 else bias=-c->idelta/2;
00229
00230 nibble= (nibble + bias) / c->idelta;
00231 nibble= av_clip(nibble, -8, 7)&0x0F;
00232
00233 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
00234
00235 c->sample2 = c->sample1;
00236 c->sample1 = av_clip_int16(predictor);
00237
00238 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
00239 if (c->idelta < 16) c->idelta = 16;
00240
00241 return nibble;
00242 }
00243
00244 static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample)
00245 {
00246 int nibble, delta;
00247
00248 if(!c->step) {
00249 c->predictor = 0;
00250 c->step = 127;
00251 }
00252
00253 delta = sample - c->predictor;
00254
00255 nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8;
00256
00257 c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8);
00258 c->predictor = av_clip_int16(c->predictor);
00259 c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
00260 c->step = av_clip(c->step, 127, 24567);
00261
00262 return nibble;
00263 }
00264
00265 typedef struct TrellisPath {
00266 int nibble;
00267 int prev;
00268 } TrellisPath;
00269
00270 typedef struct TrellisNode {
00271 uint32_t ssd;
00272 int path;
00273 int sample1;
00274 int sample2;
00275 int step;
00276 } TrellisNode;
00277
00278 static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
00279 uint8_t *dst, ADPCMChannelStatus *c, int n)
00280 {
00281 #define FREEZE_INTERVAL 128
00282
00283 const int frontier = 1 << avctx->trellis;
00284 const int stride = avctx->channels;
00285 const int version = avctx->codec->id;
00286 const int max_paths = frontier*FREEZE_INTERVAL;
00287 TrellisPath paths[max_paths], *p;
00288 TrellisNode node_buf[2][frontier];
00289 TrellisNode *nodep_buf[2][frontier];
00290 TrellisNode **nodes = nodep_buf[0];
00291 TrellisNode **nodes_next = nodep_buf[1];
00292 int pathn = 0, froze = -1, i, j, k;
00293
00294 assert(!(max_paths&(max_paths-1)));
00295
00296 memset(nodep_buf, 0, sizeof(nodep_buf));
00297 nodes[0] = &node_buf[1][0];
00298 nodes[0]->ssd = 0;
00299 nodes[0]->path = 0;
00300 nodes[0]->step = c->step_index;
00301 nodes[0]->sample1 = c->sample1;
00302 nodes[0]->sample2 = c->sample2;
00303 if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF))
00304 nodes[0]->sample1 = c->prev_sample;
00305 if(version == CODEC_ID_ADPCM_MS)
00306 nodes[0]->step = c->idelta;
00307 if(version == CODEC_ID_ADPCM_YAMAHA) {
00308 if(c->step == 0) {
00309 nodes[0]->step = 127;
00310 nodes[0]->sample1 = 0;
00311 } else {
00312 nodes[0]->step = c->step;
00313 nodes[0]->sample1 = c->predictor;
00314 }
00315 }
00316
00317 for(i=0; i<n; i++) {
00318 TrellisNode *t = node_buf[i&1];
00319 TrellisNode **u;
00320 int sample = samples[i*stride];
00321 memset(nodes_next, 0, frontier*sizeof(TrellisNode*));
00322 for(j=0; j<frontier && nodes[j]; j++) {
00323
00324 const int range = (j < frontier/2) ? 1 : 0;
00325 const int step = nodes[j]->step;
00326 int nidx;
00327 if(version == CODEC_ID_ADPCM_MS) {
00328 const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64;
00329 const int div = (sample - predictor) / step;
00330 const int nmin = av_clip(div-range, -8, 6);
00331 const int nmax = av_clip(div+range, -7, 7);
00332 for(nidx=nmin; nidx<=nmax; nidx++) {
00333 const int nibble = nidx & 0xf;
00334 int dec_sample = predictor + nidx * step;
00335 #define STORE_NODE(NAME, STEP_INDEX)\
00336 int d;\
00337 uint32_t ssd;\
00338 dec_sample = av_clip_int16(dec_sample);\
00339 d = sample - dec_sample;\
00340 ssd = nodes[j]->ssd + d*d;\
00341 if(nodes_next[frontier-1] && ssd >= nodes_next[frontier-1]->ssd)\
00342 continue;\
00343
00344
00345 \
00346 for(k=0; k<frontier && nodes_next[k]; k++) {\
00347 if(dec_sample == nodes_next[k]->sample1) {\
00348 assert(ssd >= nodes_next[k]->ssd);\
00349 goto next_##NAME;\
00350 }\
00351 }\
00352 for(k=0; k<frontier; k++) {\
00353 if(!nodes_next[k] || ssd < nodes_next[k]->ssd) {\
00354 TrellisNode *u = nodes_next[frontier-1];\
00355 if(!u) {\
00356 assert(pathn < max_paths);\
00357 u = t++;\
00358 u->path = pathn++;\
00359 }\
00360 u->ssd = ssd;\
00361 u->step = STEP_INDEX;\
00362 u->sample2 = nodes[j]->sample1;\
00363 u->sample1 = dec_sample;\
00364 paths[u->path].nibble = nibble;\
00365 paths[u->path].prev = nodes[j]->path;\
00366 memmove(&nodes_next[k+1], &nodes_next[k], (frontier-k-1)*sizeof(TrellisNode*));\
00367 nodes_next[k] = u;\
00368 break;\
00369 }\
00370 }\
00371 next_##NAME:;
00372 STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8));
00373 }
00374 } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) {
00375 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
00376 const int predictor = nodes[j]->sample1;\
00377 const int div = (sample - predictor) * 4 / STEP_TABLE;\
00378 int nmin = av_clip(div-range, -7, 6);\
00379 int nmax = av_clip(div+range, -6, 7);\
00380 if(nmin<=0) nmin--; \
00381 if(nmax<0) nmax--;\
00382 for(nidx=nmin; nidx<=nmax; nidx++) {\
00383 const int nibble = nidx<0 ? 7-nidx : nidx;\
00384 int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\
00385 STORE_NODE(NAME, STEP_INDEX);\
00386 }
00387 LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88));
00388 } else {
00389 LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567));
00390 #undef LOOP_NODES
00391 #undef STORE_NODE
00392 }
00393 }
00394
00395 u = nodes;
00396 nodes = nodes_next;
00397 nodes_next = u;
00398
00399
00400 if(nodes[0]->ssd > (1<<28)) {
00401 for(j=1; j<frontier && nodes[j]; j++)
00402 nodes[j]->ssd -= nodes[0]->ssd;
00403 nodes[0]->ssd = 0;
00404 }
00405
00406
00407 if(i == froze + FREEZE_INTERVAL) {
00408 p = &paths[nodes[0]->path];
00409 for(k=i; k>froze; k--) {
00410 dst[k] = p->nibble;
00411 p = &paths[p->prev];
00412 }
00413 froze = i;
00414 pathn = 0;
00415
00416
00417
00418 memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*));
00419 }
00420 }
00421
00422 p = &paths[nodes[0]->path];
00423 for(i=n-1; i>froze; i--) {
00424 dst[i] = p->nibble;
00425 p = &paths[p->prev];
00426 }
00427
00428 c->predictor = nodes[0]->sample1;
00429 c->sample1 = nodes[0]->sample1;
00430 c->sample2 = nodes[0]->sample2;
00431 c->step_index = nodes[0]->step;
00432 c->step = nodes[0]->step;
00433 c->idelta = nodes[0]->step;
00434 }
00435
00436 static int adpcm_encode_frame(AVCodecContext *avctx,
00437 unsigned char *frame, int buf_size, void *data)
00438 {
00439 int n, i, st;
00440 short *samples;
00441 unsigned char *dst;
00442 ADPCMContext *c = avctx->priv_data;
00443
00444 dst = frame;
00445 samples = (short *)data;
00446 st= avctx->channels == 2;
00447
00448
00449 switch(avctx->codec->id) {
00450 case CODEC_ID_ADPCM_IMA_WAV:
00451 n = avctx->frame_size / 8;
00452 c->status[0].prev_sample = (signed short)samples[0];
00453
00454 bytestream_put_le16(&dst, c->status[0].prev_sample);
00455 *dst++ = (unsigned char)c->status[0].step_index;
00456 *dst++ = 0;
00457 samples++;
00458 if (avctx->channels == 2) {
00459 c->status[1].prev_sample = (signed short)samples[0];
00460
00461 bytestream_put_le16(&dst, c->status[1].prev_sample);
00462 *dst++ = (unsigned char)c->status[1].step_index;
00463 *dst++ = 0;
00464 samples++;
00465 }
00466
00467
00468 if(avctx->trellis > 0) {
00469 uint8_t buf[2][n*8];
00470 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n*8);
00471 if(avctx->channels == 2)
00472 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n*8);
00473 for(i=0; i<n; i++) {
00474 *dst++ = buf[0][8*i+0] | (buf[0][8*i+1] << 4);
00475 *dst++ = buf[0][8*i+2] | (buf[0][8*i+3] << 4);
00476 *dst++ = buf[0][8*i+4] | (buf[0][8*i+5] << 4);
00477 *dst++ = buf[0][8*i+6] | (buf[0][8*i+7] << 4);
00478 if (avctx->channels == 2) {
00479 *dst++ = buf[1][8*i+0] | (buf[1][8*i+1] << 4);
00480 *dst++ = buf[1][8*i+2] | (buf[1][8*i+3] << 4);
00481 *dst++ = buf[1][8*i+4] | (buf[1][8*i+5] << 4);
00482 *dst++ = buf[1][8*i+6] | (buf[1][8*i+7] << 4);
00483 }
00484 }
00485 } else
00486 for (; n>0; n--) {
00487 *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]);
00488 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4;
00489 dst++;
00490 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
00491 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
00492 dst++;
00493 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
00494 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
00495 dst++;
00496 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
00497 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
00498 dst++;
00499
00500 if (avctx->channels == 2) {
00501 *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]);
00502 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4;
00503 dst++;
00504 *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]);
00505 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4;
00506 dst++;
00507 *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]);
00508 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
00509 dst++;
00510 *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
00511 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
00512 dst++;
00513 }
00514 samples += 8 * avctx->channels;
00515 }
00516 break;
00517 case CODEC_ID_ADPCM_IMA_QT:
00518 {
00519 int ch, i;
00520 PutBitContext pb;
00521 init_put_bits(&pb, dst, buf_size*8);
00522
00523 for(ch=0; ch<avctx->channels; ch++){
00524 put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7);
00525 put_bits(&pb, 7, c->status[ch].step_index);
00526 if(avctx->trellis > 0) {
00527 uint8_t buf[64];
00528 adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
00529 for(i=0; i<64; i++)
00530 put_bits(&pb, 4, buf[i^1]);
00531 c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F;
00532 } else {
00533 for (i=0; i<64; i+=2){
00534 int t1, t2;
00535 t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]);
00536 t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]);
00537 put_bits(&pb, 4, t2);
00538 put_bits(&pb, 4, t1);
00539 }
00540 c->status[ch].prev_sample &= ~0x7F;
00541 }
00542 }
00543
00544 dst += put_bits_count(&pb)>>3;
00545 break;
00546 }
00547 case CODEC_ID_ADPCM_SWF:
00548 {
00549 int i;
00550 PutBitContext pb;
00551 init_put_bits(&pb, dst, buf_size*8);
00552
00553 n = avctx->frame_size-1;
00554
00555
00556 put_bits(&pb, 2, 2);
00557
00558
00559 for(i=0; i<avctx->channels; i++){
00560 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
00561 put_sbits(&pb, 16, samples[i]);
00562 put_bits(&pb, 6, c->status[i].step_index);
00563 c->status[i].prev_sample = (signed short)samples[i];
00564 }
00565
00566 if(avctx->trellis > 0) {
00567 uint8_t buf[2][n];
00568 adpcm_compress_trellis(avctx, samples+2, buf[0], &c->status[0], n);
00569 if (avctx->channels == 2)
00570 adpcm_compress_trellis(avctx, samples+3, buf[1], &c->status[1], n);
00571 for(i=0; i<n; i++) {
00572 put_bits(&pb, 4, buf[0][i]);
00573 if (avctx->channels == 2)
00574 put_bits(&pb, 4, buf[1][i]);
00575 }
00576 } else {
00577 for (i=1; i<avctx->frame_size; i++) {
00578 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i]));
00579 if (avctx->channels == 2)
00580 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1]));
00581 }
00582 }
00583 flush_put_bits(&pb);
00584 dst += put_bits_count(&pb)>>3;
00585 break;
00586 }
00587 case CODEC_ID_ADPCM_MS:
00588 for(i=0; i<avctx->channels; i++){
00589 int predictor=0;
00590
00591 *dst++ = predictor;
00592 c->status[i].coeff1 = AdaptCoeff1[predictor];
00593 c->status[i].coeff2 = AdaptCoeff2[predictor];
00594 }
00595 for(i=0; i<avctx->channels; i++){
00596 if (c->status[i].idelta < 16)
00597 c->status[i].idelta = 16;
00598
00599 bytestream_put_le16(&dst, c->status[i].idelta);
00600 }
00601 for(i=0; i<avctx->channels; i++){
00602 c->status[i].sample2= *samples++;
00603 }
00604 for(i=0; i<avctx->channels; i++){
00605 c->status[i].sample1= *samples++;
00606
00607 bytestream_put_le16(&dst, c->status[i].sample1);
00608 }
00609 for(i=0; i<avctx->channels; i++)
00610 bytestream_put_le16(&dst, c->status[i].sample2);
00611
00612 if(avctx->trellis > 0) {
00613 int n = avctx->block_align - 7*avctx->channels;
00614 uint8_t buf[2][n];
00615 if(avctx->channels == 1) {
00616 n *= 2;
00617 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00618 for(i=0; i<n; i+=2)
00619 *dst++ = (buf[0][i] << 4) | buf[0][i+1];
00620 } else {
00621 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00622 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n);
00623 for(i=0; i<n; i++)
00624 *dst++ = (buf[0][i] << 4) | buf[1][i];
00625 }
00626 } else
00627 for(i=7*avctx->channels; i<avctx->block_align; i++) {
00628 int nibble;
00629 nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4;
00630 nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++);
00631 *dst++ = nibble;
00632 }
00633 break;
00634 case CODEC_ID_ADPCM_YAMAHA:
00635 n = avctx->frame_size / 2;
00636 if(avctx->trellis > 0) {
00637 uint8_t buf[2][n*2];
00638 n *= 2;
00639 if(avctx->channels == 1) {
00640 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00641 for(i=0; i<n; i+=2)
00642 *dst++ = buf[0][i] | (buf[0][i+1] << 4);
00643 } else {
00644 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00645 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n);
00646 for(i=0; i<n; i++)
00647 *dst++ = buf[0][i] | (buf[1][i] << 4);
00648 }
00649 } else
00650 for (; n>0; n--) {
00651 for(i = 0; i < avctx->channels; i++) {
00652 int nibble;
00653 nibble = adpcm_yamaha_compress_sample(&c->status[i], samples[i]);
00654 nibble |= adpcm_yamaha_compress_sample(&c->status[i], samples[i+avctx->channels]) << 4;
00655 *dst++ = nibble;
00656 }
00657 samples += 2 * avctx->channels;
00658 }
00659 break;
00660 default:
00661 return -1;
00662 }
00663 return dst - frame;
00664 }
00665 #endif //CONFIG_ENCODERS
00666
00667 static av_cold int adpcm_decode_init(AVCodecContext * avctx)
00668 {
00669 ADPCMContext *c = avctx->priv_data;
00670 unsigned int min_channels = 1;
00671 unsigned int max_channels = 2;
00672
00673 switch(avctx->codec->id) {
00674 case CODEC_ID_ADPCM_EA:
00675 min_channels = 2;
00676 break;
00677 case CODEC_ID_ADPCM_EA_R1:
00678 case CODEC_ID_ADPCM_EA_R2:
00679 case CODEC_ID_ADPCM_EA_R3:
00680 max_channels = 6;
00681 break;
00682 }
00683
00684 if (avctx->channels < min_channels || avctx->channels > max_channels) {
00685 av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
00686 return AVERROR(EINVAL);
00687 }
00688
00689 switch(avctx->codec->id) {
00690 case CODEC_ID_ADPCM_CT:
00691 c->status[0].step = c->status[1].step = 511;
00692 break;
00693 case CODEC_ID_ADPCM_IMA_WS:
00694 if (avctx->extradata && avctx->extradata_size == 2 * 4) {
00695 c->status[0].predictor = AV_RL32(avctx->extradata);
00696 c->status[1].predictor = AV_RL32(avctx->extradata + 4);
00697 }
00698 break;
00699 default:
00700 break;
00701 }
00702 avctx->sample_fmt = SAMPLE_FMT_S16;
00703 return 0;
00704 }
00705
00706 static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
00707 {
00708 int step_index;
00709 int predictor;
00710 int sign, delta, diff, step;
00711
00712 step = step_table[c->step_index];
00713 step_index = c->step_index + index_table[(unsigned)nibble];
00714 if (step_index < 0) step_index = 0;
00715 else if (step_index > 88) step_index = 88;
00716
00717 sign = nibble & 8;
00718 delta = nibble & 7;
00719
00720
00721
00722 diff = ((2 * delta + 1) * step) >> shift;
00723 predictor = c->predictor;
00724 if (sign) predictor -= diff;
00725 else predictor += diff;
00726
00727 c->predictor = av_clip_int16(predictor);
00728 c->step_index = step_index;
00729
00730 return (short)c->predictor;
00731 }
00732
00733 static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
00734 {
00735 int predictor;
00736
00737 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
00738 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
00739
00740 c->sample2 = c->sample1;
00741 c->sample1 = av_clip_int16(predictor);
00742 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
00743 if (c->idelta < 16) c->idelta = 16;
00744
00745 return c->sample1;
00746 }
00747
00748 static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
00749 {
00750 int sign, delta, diff;
00751 int new_step;
00752
00753 sign = nibble & 8;
00754 delta = nibble & 7;
00755
00756
00757
00758 diff = ((2 * delta + 1) * c->step) >> 3;
00759
00760 c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
00761 c->predictor = av_clip_int16(c->predictor);
00762
00763 new_step = (AdaptationTable[nibble & 7] * c->step) >> 8;
00764 c->step = av_clip(new_step, 511, 32767);
00765
00766 return (short)c->predictor;
00767 }
00768
00769 static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
00770 {
00771 int sign, delta, diff;
00772
00773 sign = nibble & (1<<(size-1));
00774 delta = nibble & ((1<<(size-1))-1);
00775 diff = delta << (7 + c->step + shift);
00776
00777
00778 c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
00779
00780
00781 if (delta >= (2*size - 3) && c->step < 3)
00782 c->step++;
00783 else if (delta == 0 && c->step > 0)
00784 c->step--;
00785
00786 return (short) c->predictor;
00787 }
00788
00789 static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
00790 {
00791 if(!c->step) {
00792 c->predictor = 0;
00793 c->step = 127;
00794 }
00795
00796 c->predictor += (c->step * yamaha_difflookup[nibble]) / 8;
00797 c->predictor = av_clip_int16(c->predictor);
00798 c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
00799 c->step = av_clip(c->step, 127, 24567);
00800 return c->predictor;
00801 }
00802
00803 static void xa_decode(short *out, const unsigned char *in,
00804 ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
00805 {
00806 int i, j;
00807 int shift,filter,f0,f1;
00808 int s_1,s_2;
00809 int d,s,t;
00810
00811 for(i=0;i<4;i++) {
00812
00813 shift = 12 - (in[4+i*2] & 15);
00814 filter = in[4+i*2] >> 4;
00815 f0 = xa_adpcm_table[filter][0];
00816 f1 = xa_adpcm_table[filter][1];
00817
00818 s_1 = left->sample1;
00819 s_2 = left->sample2;
00820
00821 for(j=0;j<28;j++) {
00822 d = in[16+i+j*4];
00823
00824 t = (signed char)(d<<4)>>4;
00825 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
00826 s_2 = s_1;
00827 s_1 = av_clip_int16(s);
00828 *out = s_1;
00829 out += inc;
00830 }
00831
00832 if (inc==2) {
00833 left->sample1 = s_1;
00834 left->sample2 = s_2;
00835 s_1 = right->sample1;
00836 s_2 = right->sample2;
00837 out = out + 1 - 28*2;
00838 }
00839
00840 shift = 12 - (in[5+i*2] & 15);
00841 filter = in[5+i*2] >> 4;
00842
00843 f0 = xa_adpcm_table[filter][0];
00844 f1 = xa_adpcm_table[filter][1];
00845
00846 for(j=0;j<28;j++) {
00847 d = in[16+i+j*4];
00848
00849 t = (signed char)d >> 4;
00850 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
00851 s_2 = s_1;
00852 s_1 = av_clip_int16(s);
00853 *out = s_1;
00854 out += inc;
00855 }
00856
00857 if (inc==2) {
00858 right->sample1 = s_1;
00859 right->sample2 = s_2;
00860 out -= 1;
00861 } else {
00862 left->sample1 = s_1;
00863 left->sample2 = s_2;
00864 }
00865 }
00866 }
00867
00868
00869
00870 #define DK3_GET_NEXT_NIBBLE() \
00871 if (decode_top_nibble_next) \
00872 { \
00873 nibble = last_byte >> 4; \
00874 decode_top_nibble_next = 0; \
00875 } \
00876 else \
00877 { \
00878 last_byte = *src++; \
00879 if (src >= buf + buf_size) break; \
00880 nibble = last_byte & 0x0F; \
00881 decode_top_nibble_next = 1; \
00882 }
00883
00884 static int adpcm_decode_frame(AVCodecContext *avctx,
00885 void *data, int *data_size,
00886 const uint8_t *buf, int buf_size)
00887 {
00888 ADPCMContext *c = avctx->priv_data;
00889 ADPCMChannelStatus *cs;
00890 int n, m, channel, i;
00891 int block_predictor[2];
00892 short *samples;
00893 short *samples_end;
00894 const uint8_t *src;
00895 int st;
00896
00897
00898 unsigned char last_byte = 0;
00899 unsigned char nibble;
00900 int decode_top_nibble_next = 0;
00901 int diff_channel;
00902
00903
00904 uint32_t samples_in_chunk;
00905 int32_t previous_left_sample, previous_right_sample;
00906 int32_t current_left_sample, current_right_sample;
00907 int32_t next_left_sample, next_right_sample;
00908 int32_t coeff1l, coeff2l, coeff1r, coeff2r;
00909 uint8_t shift_left, shift_right;
00910 int count1, count2;
00911 int coeff[2][2], shift[2];
00912
00913 if (!buf_size)
00914 return 0;
00915
00916
00917
00918
00919 if(*data_size/4 < buf_size + 8)
00920 return -1;
00921
00922 samples = data;
00923 samples_end= samples + *data_size/2;
00924 *data_size= 0;
00925 src = buf;
00926
00927 st = avctx->channels == 2 ? 1 : 0;
00928
00929 switch(avctx->codec->id) {
00930 case CODEC_ID_ADPCM_IMA_QT:
00931 n = buf_size - 2*avctx->channels;
00932 for (channel = 0; channel < avctx->channels; channel++) {
00933 cs = &(c->status[channel]);
00934
00935
00936
00937 cs->predictor = (*src++) << 8;
00938 cs->predictor |= (*src & 0x80);
00939 cs->predictor &= 0xFF80;
00940
00941
00942 if(cs->predictor & 0x8000)
00943 cs->predictor -= 0x10000;
00944
00945 cs->predictor = av_clip_int16(cs->predictor);
00946
00947 cs->step_index = (*src++) & 0x7F;
00948
00949 if (cs->step_index > 88){
00950 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
00951 cs->step_index = 88;
00952 }
00953
00954 cs->step = step_table[cs->step_index];
00955
00956 samples = (short*)data + channel;
00957
00958 for(m=32; n>0 && m>0; n--, m--) {
00959 *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3);
00960 samples += avctx->channels;
00961 *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3);
00962 samples += avctx->channels;
00963 src ++;
00964 }
00965 }
00966 if (st)
00967 samples--;
00968 break;
00969 case CODEC_ID_ADPCM_IMA_WAV:
00970 if (avctx->block_align != 0 && buf_size > avctx->block_align)
00971 buf_size = avctx->block_align;
00972
00973
00974
00975 for(i=0; i<avctx->channels; i++){
00976 cs = &(c->status[i]);
00977 cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src);
00978
00979 cs->step_index = *src++;
00980 if (cs->step_index > 88){
00981 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
00982 cs->step_index = 88;
00983 }
00984 if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]);
00985 }
00986
00987 while(src < buf + buf_size){
00988 for(m=0; m<4; m++){
00989 for(i=0; i<=st; i++)
00990 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3);
00991 for(i=0; i<=st; i++)
00992 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3);
00993 src++;
00994 }
00995 src += 4*st;
00996 }
00997 break;
00998 case CODEC_ID_ADPCM_4XM:
00999 cs = &(c->status[0]);
01000 c->status[0].predictor= (int16_t)bytestream_get_le16(&src);
01001 if(st){
01002 c->status[1].predictor= (int16_t)bytestream_get_le16(&src);
01003 }
01004 c->status[0].step_index= (int16_t)bytestream_get_le16(&src);
01005 if(st){
01006 c->status[1].step_index= (int16_t)bytestream_get_le16(&src);
01007 }
01008 if (cs->step_index < 0) cs->step_index = 0;
01009 if (cs->step_index > 88) cs->step_index = 88;
01010
01011 m= (buf_size - (src - buf))>>st;
01012 for(i=0; i<m; i++) {
01013 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4);
01014 if (st)
01015 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4);
01016 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4);
01017 if (st)
01018 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4);
01019 }
01020
01021 src += m<<st;
01022
01023 break;
01024 case CODEC_ID_ADPCM_MS:
01025 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01026 buf_size = avctx->block_align;
01027 n = buf_size - 7 * avctx->channels;
01028 if (n < 0)
01029 return -1;
01030 block_predictor[0] = av_clip(*src++, 0, 6);
01031 block_predictor[1] = 0;
01032 if (st)
01033 block_predictor[1] = av_clip(*src++, 0, 6);
01034 c->status[0].idelta = (int16_t)bytestream_get_le16(&src);
01035 if (st){
01036 c->status[1].idelta = (int16_t)bytestream_get_le16(&src);
01037 }
01038 c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]];
01039 c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]];
01040 c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]];
01041 c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]];
01042
01043 c->status[0].sample1 = bytestream_get_le16(&src);
01044 if (st) c->status[1].sample1 = bytestream_get_le16(&src);
01045 c->status[0].sample2 = bytestream_get_le16(&src);
01046 if (st) c->status[1].sample2 = bytestream_get_le16(&src);
01047
01048 *samples++ = c->status[0].sample2;
01049 if (st) *samples++ = c->status[1].sample2;
01050 *samples++ = c->status[0].sample1;
01051 if (st) *samples++ = c->status[1].sample1;
01052 for(;n>0;n--) {
01053 *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 );
01054 *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F);
01055 src ++;
01056 }
01057 break;
01058 case CODEC_ID_ADPCM_IMA_DK4:
01059 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01060 buf_size = avctx->block_align;
01061
01062 c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
01063 c->status[0].step_index = *src++;
01064 src++;
01065 *samples++ = c->status[0].predictor;
01066 if (st) {
01067 c->status[1].predictor = (int16_t)bytestream_get_le16(&src);
01068 c->status[1].step_index = *src++;
01069 src++;
01070 *samples++ = c->status[1].predictor;
01071 }
01072 while (src < buf + buf_size) {
01073
01074
01075 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01076 src[0] >> 4, 3);
01077
01078
01079
01080 if (st)
01081 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01082 src[0] & 0x0F, 3);
01083 else
01084 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01085 src[0] & 0x0F, 3);
01086
01087 src++;
01088 }
01089 break;
01090 case CODEC_ID_ADPCM_IMA_DK3:
01091 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01092 buf_size = avctx->block_align;
01093
01094 if(buf_size + 16 > (samples_end - samples)*3/8)
01095 return -1;
01096
01097 c->status[0].predictor = (int16_t)AV_RL16(src + 10);
01098 c->status[1].predictor = (int16_t)AV_RL16(src + 12);
01099 c->status[0].step_index = src[14];
01100 c->status[1].step_index = src[15];
01101
01102 src += 16;
01103 diff_channel = c->status[1].predictor;
01104
01105
01106
01107 while (1) {
01108
01109
01110
01111
01112
01113 DK3_GET_NEXT_NIBBLE();
01114 adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
01115
01116
01117 DK3_GET_NEXT_NIBBLE();
01118 adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
01119
01120
01121 diff_channel = (diff_channel + c->status[1].predictor) / 2;
01122 *samples++ = c->status[0].predictor + c->status[1].predictor;
01123 *samples++ = c->status[0].predictor - c->status[1].predictor;
01124
01125
01126 DK3_GET_NEXT_NIBBLE();
01127 adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
01128
01129
01130 diff_channel = (diff_channel + c->status[1].predictor) / 2;
01131 *samples++ = c->status[0].predictor + c->status[1].predictor;
01132 *samples++ = c->status[0].predictor - c->status[1].predictor;
01133 }
01134 break;
01135 case CODEC_ID_ADPCM_IMA_ISS:
01136 c->status[0].predictor = (int16_t)AV_RL16(src + 0);
01137 c->status[0].step_index = src[2];
01138 src += 4;
01139 if(st) {
01140 c->status[1].predictor = (int16_t)AV_RL16(src + 0);
01141 c->status[1].step_index = src[2];
01142 src += 4;
01143 }
01144
01145 while (src < buf + buf_size) {
01146
01147 if (st) {
01148 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01149 src[0] >> 4 , 3);
01150 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01151 src[0] & 0x0F, 3);
01152 } else {
01153 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01154 src[0] & 0x0F, 3);
01155 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01156 src[0] >> 4 , 3);
01157 }
01158
01159 src++;
01160 }
01161 break;
01162 case CODEC_ID_ADPCM_IMA_WS:
01163
01164 while (src < buf + buf_size) {
01165
01166 if (st) {
01167 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01168 src[0] >> 4 , 3);
01169 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01170 src[0] & 0x0F, 3);
01171 } else {
01172 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01173 src[0] >> 4 , 3);
01174 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01175 src[0] & 0x0F, 3);
01176 }
01177
01178 src++;
01179 }
01180 break;
01181 case CODEC_ID_ADPCM_XA:
01182 while (buf_size >= 128) {
01183 xa_decode(samples, src, &c->status[0], &c->status[1],
01184 avctx->channels);
01185 src += 128;
01186 samples += 28 * 8;
01187 buf_size -= 128;
01188 }
01189 break;
01190 case CODEC_ID_ADPCM_IMA_EA_EACS:
01191 samples_in_chunk = bytestream_get_le32(&src) >> (1-st);
01192
01193 if (samples_in_chunk > buf_size-4-(8<<st)) {
01194 src += buf_size - 4;
01195 break;
01196 }
01197
01198 for (i=0; i<=st; i++)
01199 c->status[i].step_index = bytestream_get_le32(&src);
01200 for (i=0; i<=st; i++)
01201 c->status[i].predictor = bytestream_get_le32(&src);
01202
01203 for (; samples_in_chunk; samples_in_chunk--, src++) {
01204 *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3);
01205 *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
01206 }
01207 break;
01208 case CODEC_ID_ADPCM_IMA_EA_SEAD:
01209 for (; src < buf+buf_size; src++) {
01210 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
01211 *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6);
01212 }
01213 break;
01214 case CODEC_ID_ADPCM_EA:
01215 samples_in_chunk = AV_RL32(src);
01216 if (samples_in_chunk >= ((buf_size - 12) * 2)) {
01217 src += buf_size;
01218 break;
01219 }
01220 src += 4;
01221 current_left_sample = (int16_t)bytestream_get_le16(&src);
01222 previous_left_sample = (int16_t)bytestream_get_le16(&src);
01223 current_right_sample = (int16_t)bytestream_get_le16(&src);
01224 previous_right_sample = (int16_t)bytestream_get_le16(&src);
01225
01226 for (count1 = 0; count1 < samples_in_chunk/28;count1++) {
01227 coeff1l = ea_adpcm_table[ *src >> 4 ];
01228 coeff2l = ea_adpcm_table[(*src >> 4 ) + 4];
01229 coeff1r = ea_adpcm_table[*src & 0x0F];
01230 coeff2r = ea_adpcm_table[(*src & 0x0F) + 4];
01231 src++;
01232
01233 shift_left = (*src >> 4 ) + 8;
01234 shift_right = (*src & 0x0F) + 8;
01235 src++;
01236
01237 for (count2 = 0; count2 < 28; count2++) {
01238 next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left;
01239 next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right;
01240 src++;
01241
01242 next_left_sample = (next_left_sample +
01243 (current_left_sample * coeff1l) +
01244 (previous_left_sample * coeff2l) + 0x80) >> 8;
01245 next_right_sample = (next_right_sample +
01246 (current_right_sample * coeff1r) +
01247 (previous_right_sample * coeff2r) + 0x80) >> 8;
01248
01249 previous_left_sample = current_left_sample;
01250 current_left_sample = av_clip_int16(next_left_sample);
01251 previous_right_sample = current_right_sample;
01252 current_right_sample = av_clip_int16(next_right_sample);
01253 *samples++ = (unsigned short)current_left_sample;
01254 *samples++ = (unsigned short)current_right_sample;
01255 }
01256 }
01257 break;
01258 case CODEC_ID_ADPCM_EA_MAXIS_XA:
01259 for(channel = 0; channel < avctx->channels; channel++) {
01260 for (i=0; i<2; i++)
01261 coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i];
01262 shift[channel] = (*src & 0x0F) + 8;
01263 src++;
01264 }
01265 for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) {
01266 for(i = 4; i >= 0; i-=4) {
01267 for(channel = 0; channel < avctx->channels; channel++) {
01268 int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel];
01269 sample = (sample +
01270 c->status[channel].sample1 * coeff[channel][0] +
01271 c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
01272 c->status[channel].sample2 = c->status[channel].sample1;
01273 c->status[channel].sample1 = av_clip_int16(sample);
01274 *samples++ = c->status[channel].sample1;
01275 }
01276 }
01277 src+=avctx->channels;
01278 }
01279 break;
01280 case CODEC_ID_ADPCM_EA_R1:
01281 case CODEC_ID_ADPCM_EA_R2:
01282 case CODEC_ID_ADPCM_EA_R3: {
01283
01284
01285
01286
01287 const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3;
01288 int32_t previous_sample, current_sample, next_sample;
01289 int32_t coeff1, coeff2;
01290 uint8_t shift;
01291 unsigned int channel;
01292 uint16_t *samplesC;
01293 const uint8_t *srcC;
01294 const uint8_t *src_end = buf + buf_size;
01295
01296 samples_in_chunk = (big_endian ? bytestream_get_be32(&src)
01297 : bytestream_get_le32(&src)) / 28;
01298 if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) ||
01299 28*samples_in_chunk*avctx->channels > samples_end-samples) {
01300 src += buf_size - 4;
01301 break;
01302 }
01303
01304 for (channel=0; channel<avctx->channels; channel++) {
01305 int32_t offset = (big_endian ? bytestream_get_be32(&src)
01306 : bytestream_get_le32(&src))
01307 + (avctx->channels-channel-1) * 4;
01308
01309 if ((offset < 0) || (offset >= src_end - src - 4)) break;
01310 srcC = src + offset;
01311 samplesC = samples + channel;
01312
01313 if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) {
01314 current_sample = (int16_t)bytestream_get_le16(&srcC);
01315 previous_sample = (int16_t)bytestream_get_le16(&srcC);
01316 } else {
01317 current_sample = c->status[channel].predictor;
01318 previous_sample = c->status[channel].prev_sample;
01319 }
01320
01321 for (count1=0; count1<samples_in_chunk; count1++) {
01322 if (*srcC == 0xEE) {
01323 srcC++;
01324 if (srcC > src_end - 30*2) break;
01325 current_sample = (int16_t)bytestream_get_be16(&srcC);
01326 previous_sample = (int16_t)bytestream_get_be16(&srcC);
01327
01328 for (count2=0; count2<28; count2++) {
01329 *samplesC = (int16_t)bytestream_get_be16(&srcC);
01330 samplesC += avctx->channels;
01331 }
01332 } else {
01333 coeff1 = ea_adpcm_table[ *srcC>>4 ];
01334 coeff2 = ea_adpcm_table[(*srcC>>4) + 4];
01335 shift = (*srcC++ & 0x0F) + 8;
01336
01337 if (srcC > src_end - 14) break;
01338 for (count2=0; count2<28; count2++) {
01339 if (count2 & 1)
01340 next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift;
01341 else
01342 next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift;
01343
01344 next_sample += (current_sample * coeff1) +
01345 (previous_sample * coeff2);
01346 next_sample = av_clip_int16(next_sample >> 8);
01347
01348 previous_sample = current_sample;
01349 current_sample = next_sample;
01350 *samplesC = current_sample;
01351 samplesC += avctx->channels;
01352 }
01353 }
01354 }
01355
01356 if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) {
01357 c->status[channel].predictor = current_sample;
01358 c->status[channel].prev_sample = previous_sample;
01359 }
01360 }
01361
01362 src = src + buf_size - (4 + 4*avctx->channels);
01363 samples += 28 * samples_in_chunk * avctx->channels;
01364 break;
01365 }
01366 case CODEC_ID_ADPCM_EA_XAS:
01367 if (samples_end-samples < 32*4*avctx->channels
01368 || buf_size < (4+15)*4*avctx->channels) {
01369 src += buf_size;
01370 break;
01371 }
01372 for (channel=0; channel<avctx->channels; channel++) {
01373 int coeff[2][4], shift[4];
01374 short *s2, *s = &samples[channel];
01375 for (n=0; n<4; n++, s+=32*avctx->channels) {
01376 for (i=0; i<2; i++)
01377 coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i];
01378 shift[n] = (src[2]&0x0F) + 8;
01379 for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels)
01380 s2[0] = (src[0]&0xF0) + (src[1]<<8);
01381 }
01382
01383 for (m=2; m<32; m+=2) {
01384 s = &samples[m*avctx->channels + channel];
01385 for (n=0; n<4; n++, src++, s+=32*avctx->channels) {
01386 for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) {
01387 int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n];
01388 int pred = s2[-1*avctx->channels] * coeff[0][n]
01389 + s2[-2*avctx->channels] * coeff[1][n];
01390 s2[0] = av_clip_int16((level + pred + 0x80) >> 8);
01391 }
01392 }
01393 }
01394 }
01395 samples += 32*4*avctx->channels;
01396 break;
01397 case CODEC_ID_ADPCM_IMA_AMV:
01398 case CODEC_ID_ADPCM_IMA_SMJPEG:
01399 c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
01400 c->status[0].step_index = bytestream_get_le16(&src);
01401
01402 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
01403 src+=4;
01404
01405 while (src < buf + buf_size) {
01406 char hi, lo;
01407 lo = *src & 0x0F;
01408 hi = *src >> 4;
01409
01410 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
01411 FFSWAP(char, hi, lo);
01412
01413 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01414 lo, 3);
01415 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01416 hi, 3);
01417 src++;
01418 }
01419 break;
01420 case CODEC_ID_ADPCM_CT:
01421 while (src < buf + buf_size) {
01422 if (st) {
01423 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01424 src[0] >> 4);
01425 *samples++ = adpcm_ct_expand_nibble(&c->status[1],
01426 src[0] & 0x0F);
01427 } else {
01428 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01429 src[0] >> 4);
01430 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01431 src[0] & 0x0F);
01432 }
01433 src++;
01434 }
01435 break;
01436 case CODEC_ID_ADPCM_SBPRO_4:
01437 case CODEC_ID_ADPCM_SBPRO_3:
01438 case CODEC_ID_ADPCM_SBPRO_2:
01439 if (!c->status[0].step_index) {
01440
01441 *samples++ = 128 * (*src++ - 0x80);
01442 if (st)
01443 *samples++ = 128 * (*src++ - 0x80);
01444 c->status[0].step_index = 1;
01445 }
01446 if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
01447 while (src < buf + buf_size) {
01448 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01449 src[0] >> 4, 4, 0);
01450 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01451 src[0] & 0x0F, 4, 0);
01452 src++;
01453 }
01454 } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) {
01455 while (src < buf + buf_size && samples + 2 < samples_end) {
01456 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01457 src[0] >> 5 , 3, 0);
01458 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01459 (src[0] >> 2) & 0x07, 3, 0);
01460 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01461 src[0] & 0x03, 2, 0);
01462 src++;
01463 }
01464 } else {
01465 while (src < buf + buf_size && samples + 3 < samples_end) {
01466 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01467 src[0] >> 6 , 2, 2);
01468 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01469 (src[0] >> 4) & 0x03, 2, 2);
01470 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01471 (src[0] >> 2) & 0x03, 2, 2);
01472 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01473 src[0] & 0x03, 2, 2);
01474 src++;
01475 }
01476 }
01477 break;
01478 case CODEC_ID_ADPCM_SWF:
01479 {
01480 GetBitContext gb;
01481 const int *table;
01482 int k0, signmask, nb_bits, count;
01483 int size = buf_size*8;
01484
01485 init_get_bits(&gb, buf, size);
01486
01487
01488 nb_bits = get_bits(&gb, 2)+2;
01489
01490 table = swf_index_tables[nb_bits-2];
01491 k0 = 1 << (nb_bits-2);
01492 signmask = 1 << (nb_bits-1);
01493
01494 while (get_bits_count(&gb) <= size - 22*avctx->channels) {
01495 for (i = 0; i < avctx->channels; i++) {
01496 *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
01497 c->status[i].step_index = get_bits(&gb, 6);
01498 }
01499
01500 for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
01501 int i;
01502
01503 for (i = 0; i < avctx->channels; i++) {
01504
01505 int delta = get_bits(&gb, nb_bits);
01506 int step = step_table[c->status[i].step_index];
01507 long vpdiff = 0;
01508 int k = k0;
01509
01510 do {
01511 if (delta & k)
01512 vpdiff += step;
01513 step >>= 1;
01514 k >>= 1;
01515 } while(k);
01516 vpdiff += step;
01517
01518 if (delta & signmask)
01519 c->status[i].predictor -= vpdiff;
01520 else
01521 c->status[i].predictor += vpdiff;
01522
01523 c->status[i].step_index += table[delta & (~signmask)];
01524
01525 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
01526 c->status[i].predictor = av_clip_int16(c->status[i].predictor);
01527
01528 *samples++ = c->status[i].predictor;
01529 if (samples >= samples_end) {
01530 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
01531 return -1;
01532 }
01533 }
01534 }
01535 }
01536 src += buf_size;
01537 break;
01538 }
01539 case CODEC_ID_ADPCM_YAMAHA:
01540 while (src < buf + buf_size) {
01541 if (st) {
01542 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01543 src[0] & 0x0F);
01544 *samples++ = adpcm_yamaha_expand_nibble(&c->status[1],
01545 src[0] >> 4 );
01546 } else {
01547 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01548 src[0] & 0x0F);
01549 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01550 src[0] >> 4 );
01551 }
01552 src++;
01553 }
01554 break;
01555 case CODEC_ID_ADPCM_THP:
01556 {
01557 int table[2][16];
01558 unsigned int samplecnt;
01559 int prev[2][2];
01560 int ch;
01561
01562 if (buf_size < 80) {
01563 av_log(avctx, AV_LOG_ERROR, "frame too small\n");
01564 return -1;
01565 }
01566
01567 src+=4;
01568 samplecnt = bytestream_get_be32(&src);
01569
01570 for (i = 0; i < 32; i++)
01571 table[0][i] = (int16_t)bytestream_get_be16(&src);
01572
01573
01574 for (i = 0; i < 4; i++)
01575 prev[0][i] = (int16_t)bytestream_get_be16(&src);
01576
01577 if (samplecnt >= (samples_end - samples) / (st + 1)) {
01578 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
01579 return -1;
01580 }
01581
01582 for (ch = 0; ch <= st; ch++) {
01583 samples = (unsigned short *) data + ch;
01584
01585
01586 for (i = 0; i < samplecnt / 14; i++) {
01587 int index = (*src >> 4) & 7;
01588 unsigned int exp = 28 - (*src++ & 15);
01589 int factor1 = table[ch][index * 2];
01590 int factor2 = table[ch][index * 2 + 1];
01591
01592
01593 for (n = 0; n < 14; n++) {
01594 int32_t sampledat;
01595 if(n&1) sampledat= *src++ <<28;
01596 else sampledat= (*src&0xF0)<<24;
01597
01598 sampledat = ((prev[ch][0]*factor1
01599 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp);
01600 *samples = av_clip_int16(sampledat);
01601 prev[ch][1] = prev[ch][0];
01602 prev[ch][0] = *samples++;
01603
01604
01605
01606 samples += st;
01607 }
01608 }
01609 }
01610
01611
01612
01613 samples -= st;
01614 break;
01615 }
01616
01617 default:
01618 return -1;
01619 }
01620 *data_size = (uint8_t *)samples - (uint8_t *)data;
01621 return src - buf;
01622 }
01623
01624
01625
01626 #if CONFIG_ENCODERS
01627 #define ADPCM_ENCODER(id,name,long_name_) \
01628 AVCodec name ## _encoder = { \
01629 #name, \
01630 CODEC_TYPE_AUDIO, \
01631 id, \
01632 sizeof(ADPCMContext), \
01633 adpcm_encode_init, \
01634 adpcm_encode_frame, \
01635 adpcm_encode_close, \
01636 NULL, \
01637 .sample_fmts = (enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, \
01638 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
01639 };
01640 #else
01641 #define ADPCM_ENCODER(id,name,long_name_)
01642 #endif
01643
01644 #if CONFIG_DECODERS
01645 #define ADPCM_DECODER(id,name,long_name_) \
01646 AVCodec name ## _decoder = { \
01647 #name, \
01648 CODEC_TYPE_AUDIO, \
01649 id, \
01650 sizeof(ADPCMContext), \
01651 adpcm_decode_init, \
01652 NULL, \
01653 NULL, \
01654 adpcm_decode_frame, \
01655 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
01656 };
01657 #else
01658 #define ADPCM_DECODER(id,name,long_name_)
01659 #endif
01660
01661 #define ADPCM_CODEC(id,name,long_name_) \
01662 ADPCM_ENCODER(id,name,long_name_) ADPCM_DECODER(id,name,long_name_)
01663
01664
01665 ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie");
01666 ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology");
01667 ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts");
01668 ADPCM_DECODER(CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
01669 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1");
01670 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2");
01671 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3");
01672 ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
01673 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV");
01674 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
01675 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
01676 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
01677 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
01678 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
01679 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
01680 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
01681 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
01682 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood");
01683 ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
01684 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
01685 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
01686 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
01687 ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
01688 ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP");
01689 ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA");
01690 ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");