00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023 #include "libavutil/avassert.h"
00024 #include "libavutil/channel_layout.h"
00025 #include "libavutil/opt.h"
00026 #include "avcodec.h"
00027 #include "dsputil.h"
00028 #include "bytestream.h"
00029 #include "internal.h"
00030
00036 #define MAX_CHANNELS 2
00037 #define MAX_BYTESPERSAMPLE 3
00038
00039 #define APE_FRAMECODE_MONO_SILENCE 1
00040 #define APE_FRAMECODE_STEREO_SILENCE 3
00041 #define APE_FRAMECODE_PSEUDO_STEREO 4
00042
00043 #define HISTORY_SIZE 512
00044 #define PREDICTOR_ORDER 8
00045
00046 #define PREDICTOR_SIZE 50
00047
00048 #define YDELAYA (18 + PREDICTOR_ORDER*4)
00049 #define YDELAYB (18 + PREDICTOR_ORDER*3)
00050 #define XDELAYA (18 + PREDICTOR_ORDER*2)
00051 #define XDELAYB (18 + PREDICTOR_ORDER)
00052
00053 #define YADAPTCOEFFSA 18
00054 #define XADAPTCOEFFSA 14
00055 #define YADAPTCOEFFSB 10
00056 #define XADAPTCOEFFSB 5
00057
00062 enum APECompressionLevel {
00063 COMPRESSION_LEVEL_FAST = 1000,
00064 COMPRESSION_LEVEL_NORMAL = 2000,
00065 COMPRESSION_LEVEL_HIGH = 3000,
00066 COMPRESSION_LEVEL_EXTRA_HIGH = 4000,
00067 COMPRESSION_LEVEL_INSANE = 5000
00068 };
00071 #define APE_FILTER_LEVELS 3
00072
00074 static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = {
00075 { 0, 0, 0 },
00076 { 16, 0, 0 },
00077 { 64, 0, 0 },
00078 { 32, 256, 0 },
00079 { 16, 256, 1280 }
00080 };
00081
00083 static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS] = {
00084 { 0, 0, 0 },
00085 { 11, 0, 0 },
00086 { 11, 0, 0 },
00087 { 10, 13, 0 },
00088 { 11, 13, 15 }
00089 };
00090
00091
00093 typedef struct APEFilter {
00094 int16_t *coeffs;
00095 int16_t *adaptcoeffs;
00096 int16_t *historybuffer;
00097 int16_t *delay;
00098
00099 int avg;
00100 } APEFilter;
00101
00102 typedef struct APERice {
00103 uint32_t k;
00104 uint32_t ksum;
00105 } APERice;
00106
00107 typedef struct APERangecoder {
00108 uint32_t low;
00109 uint32_t range;
00110 uint32_t help;
00111 unsigned int buffer;
00112 } APERangecoder;
00113
00115 typedef struct APEPredictor {
00116 int32_t *buf;
00117
00118 int32_t lastA[2];
00119
00120 int32_t filterA[2];
00121 int32_t filterB[2];
00122
00123 int32_t coeffsA[2][4];
00124 int32_t coeffsB[2][5];
00125 int32_t historybuffer[HISTORY_SIZE + PREDICTOR_SIZE];
00126 } APEPredictor;
00127
00129 typedef struct APEContext {
00130 AVClass *class;
00131 AVCodecContext *avctx;
00132 AVFrame frame;
00133 DSPContext dsp;
00134 int channels;
00135 int samples;
00136 int bps;
00137
00138 int fileversion;
00139 int compression_level;
00140 int fset;
00141 int flags;
00142
00143 uint32_t CRC;
00144 int frameflags;
00145 APEPredictor predictor;
00146
00147 int32_t *decoded_buffer;
00148 int decoded_size;
00149 int32_t *decoded[MAX_CHANNELS];
00150 int blocks_per_loop;
00151
00152 int16_t* filterbuf[APE_FILTER_LEVELS];
00153
00154 APERangecoder rc;
00155 APERice riceX;
00156 APERice riceY;
00157 APEFilter filters[APE_FILTER_LEVELS][2];
00158
00159 uint8_t *data;
00160 uint8_t *data_end;
00161 int data_size;
00162 const uint8_t *ptr;
00163
00164 int error;
00165 } APEContext;
00166
00167
00168
00169 static av_cold int ape_decode_close(AVCodecContext *avctx)
00170 {
00171 APEContext *s = avctx->priv_data;
00172 int i;
00173
00174 for (i = 0; i < APE_FILTER_LEVELS; i++)
00175 av_freep(&s->filterbuf[i]);
00176
00177 av_freep(&s->decoded_buffer);
00178 av_freep(&s->data);
00179 s->decoded_size = s->data_size = 0;
00180
00181 return 0;
00182 }
00183
00184 static av_cold int ape_decode_init(AVCodecContext *avctx)
00185 {
00186 APEContext *s = avctx->priv_data;
00187 int i;
00188
00189 if (avctx->extradata_size != 6) {
00190 av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
00191 return AVERROR(EINVAL);
00192 }
00193 if (avctx->channels > 2) {
00194 av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
00195 return AVERROR(EINVAL);
00196 }
00197 s->bps = avctx->bits_per_coded_sample;
00198 switch (s->bps) {
00199 case 8:
00200 avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
00201 break;
00202 case 16:
00203 avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
00204 break;
00205 case 24:
00206 avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
00207 break;
00208 default:
00209 av_log_ask_for_sample(avctx, "Unsupported bits per coded sample %d\n",
00210 s->bps);
00211 return AVERROR_PATCHWELCOME;
00212 }
00213 s->avctx = avctx;
00214 s->channels = avctx->channels;
00215 s->fileversion = AV_RL16(avctx->extradata);
00216 s->compression_level = AV_RL16(avctx->extradata + 2);
00217 s->flags = AV_RL16(avctx->extradata + 4);
00218
00219 av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n",
00220 s->compression_level, s->flags);
00221 if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE || !s->compression_level) {
00222 av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
00223 s->compression_level);
00224 return AVERROR_INVALIDDATA;
00225 }
00226 s->fset = s->compression_level / 1000 - 1;
00227 for (i = 0; i < APE_FILTER_LEVELS; i++) {
00228 if (!ape_filter_orders[s->fset][i])
00229 break;
00230 FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
00231 (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
00232 filter_alloc_fail);
00233 }
00234
00235 ff_dsputil_init(&s->dsp, avctx);
00236 avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
00237
00238 avcodec_get_frame_defaults(&s->frame);
00239 avctx->coded_frame = &s->frame;
00240
00241 return 0;
00242 filter_alloc_fail:
00243 ape_decode_close(avctx);
00244 return AVERROR(ENOMEM);
00245 }
00246
00252 #define CODE_BITS 32
00253 #define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1))
00254 #define SHIFT_BITS (CODE_BITS - 9)
00255 #define EXTRA_BITS ((CODE_BITS-2) % 8 + 1)
00256 #define BOTTOM_VALUE (TOP_VALUE >> 8)
00257
00259 static inline void range_start_decoding(APEContext *ctx)
00260 {
00261 ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
00262 ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
00263 ctx->rc.range = (uint32_t) 1 << EXTRA_BITS;
00264 }
00265
00267 static inline void range_dec_normalize(APEContext *ctx)
00268 {
00269 while (ctx->rc.range <= BOTTOM_VALUE) {
00270 ctx->rc.buffer <<= 8;
00271 if(ctx->ptr < ctx->data_end) {
00272 ctx->rc.buffer += *ctx->ptr;
00273 ctx->ptr++;
00274 } else {
00275 ctx->error = 1;
00276 }
00277 ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
00278 ctx->rc.range <<= 8;
00279 }
00280 }
00281
00288 static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
00289 {
00290 range_dec_normalize(ctx);
00291 ctx->rc.help = ctx->rc.range / tot_f;
00292 return ctx->rc.low / ctx->rc.help;
00293 }
00294
00300 static inline int range_decode_culshift(APEContext *ctx, int shift)
00301 {
00302 range_dec_normalize(ctx);
00303 ctx->rc.help = ctx->rc.range >> shift;
00304 return ctx->rc.low / ctx->rc.help;
00305 }
00306
00307
00314 static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
00315 {
00316 ctx->rc.low -= ctx->rc.help * lt_f;
00317 ctx->rc.range = ctx->rc.help * sy_f;
00318 }
00319
00321 static inline int range_decode_bits(APEContext *ctx, int n)
00322 {
00323 int sym = range_decode_culshift(ctx, n);
00324 range_decode_update(ctx, 1, sym);
00325 return sym;
00326 }
00327
00328
00329 #define MODEL_ELEMENTS 64
00330
00334 static const uint16_t counts_3970[22] = {
00335 0, 14824, 28224, 39348, 47855, 53994, 58171, 60926,
00336 62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419,
00337 65450, 65469, 65480, 65487, 65491, 65493,
00338 };
00339
00343 static const uint16_t counts_diff_3970[21] = {
00344 14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756,
00345 1104, 677, 415, 248, 150, 89, 54, 31,
00346 19, 11, 7, 4, 2,
00347 };
00348
00352 static const uint16_t counts_3980[22] = {
00353 0, 19578, 36160, 48417, 56323, 60899, 63265, 64435,
00354 64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482,
00355 65485, 65488, 65490, 65491, 65492, 65493,
00356 };
00357
00361 static const uint16_t counts_diff_3980[21] = {
00362 19578, 16582, 12257, 7906, 4576, 2366, 1170, 536,
00363 261, 119, 65, 31, 19, 10, 6, 3,
00364 3, 2, 1, 1, 1,
00365 };
00366
00373 static inline int range_get_symbol(APEContext *ctx,
00374 const uint16_t counts[],
00375 const uint16_t counts_diff[])
00376 {
00377 int symbol, cf;
00378
00379 cf = range_decode_culshift(ctx, 16);
00380
00381 if(cf > 65492){
00382 symbol= cf - 65535 + 63;
00383 range_decode_update(ctx, 1, cf);
00384 if(cf > 65535)
00385 ctx->error=1;
00386 return symbol;
00387 }
00388
00389 for (symbol = 0; counts[symbol + 1] <= cf; symbol++);
00390
00391 range_decode_update(ctx, counts_diff[symbol], counts[symbol]);
00392
00393 return symbol;
00394 }
00396
00397 static inline void update_rice(APERice *rice, unsigned int x)
00398 {
00399 int lim = rice->k ? (1 << (rice->k + 4)) : 0;
00400 rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
00401
00402 if (rice->ksum < lim)
00403 rice->k--;
00404 else if (rice->ksum >= (1 << (rice->k + 5)))
00405 rice->k++;
00406 }
00407
00408 static inline int ape_decode_value(APEContext *ctx, APERice *rice)
00409 {
00410 unsigned int x, overflow;
00411
00412 if (ctx->fileversion < 3990) {
00413 int tmpk;
00414
00415 overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970);
00416
00417 if (overflow == (MODEL_ELEMENTS - 1)) {
00418 tmpk = range_decode_bits(ctx, 5);
00419 overflow = 0;
00420 } else
00421 tmpk = (rice->k < 1) ? 0 : rice->k - 1;
00422
00423 if (tmpk <= 16)
00424 x = range_decode_bits(ctx, tmpk);
00425 else if (tmpk <= 32) {
00426 x = range_decode_bits(ctx, 16);
00427 x |= (range_decode_bits(ctx, tmpk - 16) << 16);
00428 } else {
00429 av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
00430 return AVERROR_INVALIDDATA;
00431 }
00432 x += overflow << tmpk;
00433 } else {
00434 int base, pivot;
00435
00436 pivot = rice->ksum >> 5;
00437 if (pivot == 0)
00438 pivot = 1;
00439
00440 overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980);
00441
00442 if (overflow == (MODEL_ELEMENTS - 1)) {
00443 overflow = range_decode_bits(ctx, 16) << 16;
00444 overflow |= range_decode_bits(ctx, 16);
00445 }
00446
00447 if (pivot < 0x10000) {
00448 base = range_decode_culfreq(ctx, pivot);
00449 range_decode_update(ctx, 1, base);
00450 } else {
00451 int base_hi = pivot, base_lo;
00452 int bbits = 0;
00453
00454 while (base_hi & ~0xFFFF) {
00455 base_hi >>= 1;
00456 bbits++;
00457 }
00458 base_hi = range_decode_culfreq(ctx, base_hi + 1);
00459 range_decode_update(ctx, 1, base_hi);
00460 base_lo = range_decode_culfreq(ctx, 1 << bbits);
00461 range_decode_update(ctx, 1, base_lo);
00462
00463 base = (base_hi << bbits) + base_lo;
00464 }
00465
00466 x = base + overflow * pivot;
00467 }
00468
00469 update_rice(rice, x);
00470
00471
00472 if (x & 1)
00473 return (x >> 1) + 1;
00474 else
00475 return -(x >> 1);
00476 }
00477
00478 static void entropy_decode(APEContext *ctx, int blockstodecode, int stereo)
00479 {
00480 int32_t *decoded0 = ctx->decoded[0];
00481 int32_t *decoded1 = ctx->decoded[1];
00482
00483 while (blockstodecode--) {
00484 *decoded0++ = ape_decode_value(ctx, &ctx->riceY);
00485 if (stereo)
00486 *decoded1++ = ape_decode_value(ctx, &ctx->riceX);
00487 }
00488 }
00489
00490 static int init_entropy_decoder(APEContext *ctx)
00491 {
00492
00493 if (ctx->data_end - ctx->ptr < 6)
00494 return AVERROR_INVALIDDATA;
00495 ctx->CRC = bytestream_get_be32(&ctx->ptr);
00496
00497
00498 ctx->frameflags = 0;
00499 if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
00500 ctx->CRC &= ~0x80000000;
00501
00502 if (ctx->data_end - ctx->ptr < 6)
00503 return AVERROR_INVALIDDATA;
00504 ctx->frameflags = bytestream_get_be32(&ctx->ptr);
00505 }
00506
00507
00508 ctx->riceX.k = 10;
00509 ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
00510 ctx->riceY.k = 10;
00511 ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;
00512
00513
00514 ctx->ptr++;
00515
00516 range_start_decoding(ctx);
00517
00518 return 0;
00519 }
00520
00521 static const int32_t initial_coeffs[4] = {
00522 360, 317, -109, 98
00523 };
00524
00525 static void init_predictor_decoder(APEContext *ctx)
00526 {
00527 APEPredictor *p = &ctx->predictor;
00528
00529
00530 memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer));
00531 p->buf = p->historybuffer;
00532
00533
00534 memcpy(p->coeffsA[0], initial_coeffs, sizeof(initial_coeffs));
00535 memcpy(p->coeffsA[1], initial_coeffs, sizeof(initial_coeffs));
00536 memset(p->coeffsB, 0, sizeof(p->coeffsB));
00537
00538 p->filterA[0] = p->filterA[1] = 0;
00539 p->filterB[0] = p->filterB[1] = 0;
00540 p->lastA[0] = p->lastA[1] = 0;
00541 }
00542
00544 static inline int APESIGN(int32_t x) {
00545 return (x < 0) - (x > 0);
00546 }
00547
00548 static av_always_inline int predictor_update_filter(APEPredictor *p,
00549 const int decoded, const int filter,
00550 const int delayA, const int delayB,
00551 const int adaptA, const int adaptB)
00552 {
00553 int32_t predictionA, predictionB, sign;
00554
00555 p->buf[delayA] = p->lastA[filter];
00556 p->buf[adaptA] = APESIGN(p->buf[delayA]);
00557 p->buf[delayA - 1] = p->buf[delayA] - p->buf[delayA - 1];
00558 p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);
00559
00560 predictionA = p->buf[delayA ] * p->coeffsA[filter][0] +
00561 p->buf[delayA - 1] * p->coeffsA[filter][1] +
00562 p->buf[delayA - 2] * p->coeffsA[filter][2] +
00563 p->buf[delayA - 3] * p->coeffsA[filter][3];
00564
00565
00566 p->buf[delayB] = p->filterA[filter ^ 1] - ((p->filterB[filter] * 31) >> 5);
00567 p->buf[adaptB] = APESIGN(p->buf[delayB]);
00568 p->buf[delayB - 1] = p->buf[delayB] - p->buf[delayB - 1];
00569 p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);
00570 p->filterB[filter] = p->filterA[filter ^ 1];
00571
00572 predictionB = p->buf[delayB ] * p->coeffsB[filter][0] +
00573 p->buf[delayB - 1] * p->coeffsB[filter][1] +
00574 p->buf[delayB - 2] * p->coeffsB[filter][2] +
00575 p->buf[delayB - 3] * p->coeffsB[filter][3] +
00576 p->buf[delayB - 4] * p->coeffsB[filter][4];
00577
00578 p->lastA[filter] = decoded + ((predictionA + (predictionB >> 1)) >> 10);
00579 p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5);
00580
00581 sign = APESIGN(decoded);
00582 p->coeffsA[filter][0] += p->buf[adaptA ] * sign;
00583 p->coeffsA[filter][1] += p->buf[adaptA - 1] * sign;
00584 p->coeffsA[filter][2] += p->buf[adaptA - 2] * sign;
00585 p->coeffsA[filter][3] += p->buf[adaptA - 3] * sign;
00586 p->coeffsB[filter][0] += p->buf[adaptB ] * sign;
00587 p->coeffsB[filter][1] += p->buf[adaptB - 1] * sign;
00588 p->coeffsB[filter][2] += p->buf[adaptB - 2] * sign;
00589 p->coeffsB[filter][3] += p->buf[adaptB - 3] * sign;
00590 p->coeffsB[filter][4] += p->buf[adaptB - 4] * sign;
00591
00592 return p->filterA[filter];
00593 }
00594
00595 static void predictor_decode_stereo(APEContext *ctx, int count)
00596 {
00597 APEPredictor *p = &ctx->predictor;
00598 int32_t *decoded0 = ctx->decoded[0];
00599 int32_t *decoded1 = ctx->decoded[1];
00600
00601 while (count--) {
00602
00603 *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
00604 YADAPTCOEFFSA, YADAPTCOEFFSB);
00605 decoded0++;
00606 *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
00607 XADAPTCOEFFSA, XADAPTCOEFFSB);
00608 decoded1++;
00609
00610
00611 p->buf++;
00612
00613
00614 if (p->buf == p->historybuffer + HISTORY_SIZE) {
00615 memmove(p->historybuffer, p->buf,
00616 PREDICTOR_SIZE * sizeof(*p->historybuffer));
00617 p->buf = p->historybuffer;
00618 }
00619 }
00620 }
00621
00622 static void predictor_decode_mono(APEContext *ctx, int count)
00623 {
00624 APEPredictor *p = &ctx->predictor;
00625 int32_t *decoded0 = ctx->decoded[0];
00626 int32_t predictionA, currentA, A, sign;
00627
00628 currentA = p->lastA[0];
00629
00630 while (count--) {
00631 A = *decoded0;
00632
00633 p->buf[YDELAYA] = currentA;
00634 p->buf[YDELAYA - 1] = p->buf[YDELAYA] - p->buf[YDELAYA - 1];
00635
00636 predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] +
00637 p->buf[YDELAYA - 1] * p->coeffsA[0][1] +
00638 p->buf[YDELAYA - 2] * p->coeffsA[0][2] +
00639 p->buf[YDELAYA - 3] * p->coeffsA[0][3];
00640
00641 currentA = A + (predictionA >> 10);
00642
00643 p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]);
00644 p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);
00645
00646 sign = APESIGN(A);
00647 p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign;
00648 p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign;
00649 p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign;
00650 p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign;
00651
00652 p->buf++;
00653
00654
00655 if (p->buf == p->historybuffer + HISTORY_SIZE) {
00656 memmove(p->historybuffer, p->buf,
00657 PREDICTOR_SIZE * sizeof(*p->historybuffer));
00658 p->buf = p->historybuffer;
00659 }
00660
00661 p->filterA[0] = currentA + ((p->filterA[0] * 31) >> 5);
00662 *(decoded0++) = p->filterA[0];
00663 }
00664
00665 p->lastA[0] = currentA;
00666 }
00667
00668 static void do_init_filter(APEFilter *f, int16_t *buf, int order)
00669 {
00670 f->coeffs = buf;
00671 f->historybuffer = buf + order;
00672 f->delay = f->historybuffer + order * 2;
00673 f->adaptcoeffs = f->historybuffer + order;
00674
00675 memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer));
00676 memset(f->coeffs, 0, order * sizeof(*f->coeffs));
00677 f->avg = 0;
00678 }
00679
00680 static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
00681 {
00682 do_init_filter(&f[0], buf, order);
00683 do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
00684 }
00685
00686 static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
00687 int32_t *data, int count, int order, int fracbits)
00688 {
00689 int res;
00690 int absres;
00691
00692 while (count--) {
00693
00694 res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order,
00695 f->adaptcoeffs - order,
00696 order, APESIGN(*data));
00697 res = (res + (1 << (fracbits - 1))) >> fracbits;
00698 res += *data;
00699 *data++ = res;
00700
00701
00702 *f->delay++ = av_clip_int16(res);
00703
00704 if (version < 3980) {
00705
00706 f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
00707 f->adaptcoeffs[-4] >>= 1;
00708 f->adaptcoeffs[-8] >>= 1;
00709 } else {
00710
00711
00712
00713 absres = FFABS(res);
00714 if (absres)
00715 *f->adaptcoeffs = ((res & (-1<<31)) ^ (-1<<30)) >>
00716 (25 + (absres <= f->avg*3) + (absres <= f->avg*4/3));
00717 else
00718 *f->adaptcoeffs = 0;
00719
00720 f->avg += (absres - f->avg) / 16;
00721
00722 f->adaptcoeffs[-1] >>= 1;
00723 f->adaptcoeffs[-2] >>= 1;
00724 f->adaptcoeffs[-8] >>= 1;
00725 }
00726
00727 f->adaptcoeffs++;
00728
00729
00730 if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
00731 memmove(f->historybuffer, f->delay - (order * 2),
00732 (order * 2) * sizeof(*f->historybuffer));
00733 f->delay = f->historybuffer + order * 2;
00734 f->adaptcoeffs = f->historybuffer + order;
00735 }
00736 }
00737 }
00738
00739 static void apply_filter(APEContext *ctx, APEFilter *f,
00740 int32_t *data0, int32_t *data1,
00741 int count, int order, int fracbits)
00742 {
00743 do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
00744 if (data1)
00745 do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
00746 }
00747
00748 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
00749 int32_t *decoded1, int count)
00750 {
00751 int i;
00752
00753 for (i = 0; i < APE_FILTER_LEVELS; i++) {
00754 if (!ape_filter_orders[ctx->fset][i])
00755 break;
00756 apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
00757 ape_filter_orders[ctx->fset][i],
00758 ape_filter_fracbits[ctx->fset][i]);
00759 }
00760 }
00761
00762 static int init_frame_decoder(APEContext *ctx)
00763 {
00764 int i, ret;
00765 if ((ret = init_entropy_decoder(ctx)) < 0)
00766 return ret;
00767 init_predictor_decoder(ctx);
00768
00769 for (i = 0; i < APE_FILTER_LEVELS; i++) {
00770 if (!ape_filter_orders[ctx->fset][i])
00771 break;
00772 init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
00773 ape_filter_orders[ctx->fset][i]);
00774 }
00775 return 0;
00776 }
00777
00778 static void ape_unpack_mono(APEContext *ctx, int count)
00779 {
00780 if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
00781
00782 av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
00783 return;
00784 }
00785
00786 entropy_decode(ctx, count, 0);
00787 ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
00788
00789
00790 predictor_decode_mono(ctx, count);
00791
00792
00793 if (ctx->channels == 2) {
00794 memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1]));
00795 }
00796 }
00797
00798 static void ape_unpack_stereo(APEContext *ctx, int count)
00799 {
00800 int32_t left, right;
00801 int32_t *decoded0 = ctx->decoded[0];
00802 int32_t *decoded1 = ctx->decoded[1];
00803
00804 if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
00805
00806 av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");
00807 return;
00808 }
00809
00810 entropy_decode(ctx, count, 1);
00811 ape_apply_filters(ctx, decoded0, decoded1, count);
00812
00813
00814 predictor_decode_stereo(ctx, count);
00815
00816
00817 while (count--) {
00818 left = *decoded1 - (*decoded0 / 2);
00819 right = left + *decoded0;
00820
00821 *(decoded0++) = left;
00822 *(decoded1++) = right;
00823 }
00824 }
00825
00826 static int ape_decode_frame(AVCodecContext *avctx, void *data,
00827 int *got_frame_ptr, AVPacket *avpkt)
00828 {
00829 const uint8_t *buf = avpkt->data;
00830 APEContext *s = avctx->priv_data;
00831 uint8_t *sample8;
00832 int16_t *sample16;
00833 int32_t *sample24;
00834 int i, ch, ret;
00835 int blockstodecode;
00836
00837
00838
00839 av_assert0(s->samples >= 0);
00840
00841 if(!s->samples){
00842 uint32_t nblocks, offset;
00843 int buf_size;
00844
00845 if (!avpkt->size) {
00846 *got_frame_ptr = 0;
00847 return 0;
00848 }
00849 if (avpkt->size < 8) {
00850 av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
00851 return AVERROR_INVALIDDATA;
00852 }
00853 buf_size = avpkt->size & ~3;
00854 if (buf_size != avpkt->size) {
00855 av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. "
00856 "extra bytes at the end will be skipped.\n");
00857 }
00858
00859 av_fast_malloc(&s->data, &s->data_size, buf_size);
00860 if (!s->data)
00861 return AVERROR(ENOMEM);
00862 s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);
00863 s->ptr = s->data;
00864 s->data_end = s->data + buf_size;
00865
00866 nblocks = bytestream_get_be32(&s->ptr);
00867 offset = bytestream_get_be32(&s->ptr);
00868 if (offset > 3) {
00869 av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
00870 s->data = NULL;
00871 return AVERROR_INVALIDDATA;
00872 }
00873 if (s->data_end - s->ptr < offset) {
00874 av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
00875 return AVERROR_INVALIDDATA;
00876 }
00877 s->ptr += offset;
00878
00879 if (!nblocks || nblocks > INT_MAX) {
00880 av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks);
00881 return AVERROR_INVALIDDATA;
00882 }
00883 s->samples = nblocks;
00884
00885
00886 if (init_frame_decoder(s) < 0) {
00887 av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
00888 return AVERROR_INVALIDDATA;
00889 }
00890 }
00891
00892 if (!s->data) {
00893 *got_frame_ptr = 0;
00894 return avpkt->size;
00895 }
00896
00897 blockstodecode = FFMIN(s->blocks_per_loop, s->samples);
00898
00899
00900 av_fast_malloc(&s->decoded_buffer, &s->decoded_size,
00901 2 * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer));
00902 if (!s->decoded_buffer)
00903 return AVERROR(ENOMEM);
00904 memset(s->decoded_buffer, 0, s->decoded_size);
00905 s->decoded[0] = s->decoded_buffer;
00906 s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
00907
00908
00909 s->frame.nb_samples = blockstodecode;
00910 if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
00911 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
00912 return ret;
00913 }
00914
00915 s->error=0;
00916
00917 if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))
00918 ape_unpack_mono(s, blockstodecode);
00919 else
00920 ape_unpack_stereo(s, blockstodecode);
00921 emms_c();
00922
00923 if (s->error) {
00924 s->samples=0;
00925 av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
00926 return AVERROR_INVALIDDATA;
00927 }
00928
00929 switch (s->bps) {
00930 case 8:
00931 for (ch = 0; ch < s->channels; ch++) {
00932 sample8 = (uint8_t *)s->frame.data[ch];
00933 for (i = 0; i < blockstodecode; i++)
00934 *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
00935 }
00936 break;
00937 case 16:
00938 for (ch = 0; ch < s->channels; ch++) {
00939 sample16 = (int16_t *)s->frame.data[ch];
00940 for (i = 0; i < blockstodecode; i++)
00941 *sample16++ = s->decoded[ch][i];
00942 }
00943 break;
00944 case 24:
00945 for (ch = 0; ch < s->channels; ch++) {
00946 sample24 = (int32_t *)s->frame.data[ch];
00947 for (i = 0; i < blockstodecode; i++)
00948 *sample24++ = s->decoded[ch][i] << 8;
00949 }
00950 break;
00951 }
00952
00953 s->samples -= blockstodecode;
00954
00955 *got_frame_ptr = 1;
00956 *(AVFrame *)data = s->frame;
00957
00958 return !s->samples ? avpkt->size : 0;
00959 }
00960
00961 static void ape_flush(AVCodecContext *avctx)
00962 {
00963 APEContext *s = avctx->priv_data;
00964 s->samples= 0;
00965 }
00966
00967 #define OFFSET(x) offsetof(APEContext, x)
00968 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
00969 static const AVOption options[] = {
00970 { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" },
00971 { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" },
00972 { NULL},
00973 };
00974
00975 static const AVClass ape_decoder_class = {
00976 .class_name = "APE decoder",
00977 .item_name = av_default_item_name,
00978 .option = options,
00979 .version = LIBAVUTIL_VERSION_INT,
00980 };
00981
00982 AVCodec ff_ape_decoder = {
00983 .name = "ape",
00984 .type = AVMEDIA_TYPE_AUDIO,
00985 .id = AV_CODEC_ID_APE,
00986 .priv_data_size = sizeof(APEContext),
00987 .init = ape_decode_init,
00988 .close = ape_decode_close,
00989 .decode = ape_decode_frame,
00990 .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1,
00991 .flush = ape_flush,
00992 .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
00993 .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
00994 AV_SAMPLE_FMT_S16P,
00995 AV_SAMPLE_FMT_S32P,
00996 AV_SAMPLE_FMT_NONE },
00997 .priv_class = &ape_decoder_class,
00998 };