FFmpeg
apedec.c
Go to the documentation of this file.
1 /*
2  * Monkey's Audio lossless audio decoder
3  * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
4  * based upon libdemac from Dave Chapman.
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <inttypes.h>
24 
25 #include "libavutil/avassert.h"
27 #include "libavutil/opt.h"
28 #include "lossless_audiodsp.h"
29 #include "avcodec.h"
30 #include "bswapdsp.h"
31 #include "bytestream.h"
32 #include "internal.h"
33 #include "get_bits.h"
34 #include "unary.h"
35 
36 /**
37  * @file
38  * Monkey's Audio lossless audio decoder
39  */
40 
41 #define MAX_CHANNELS 2
42 #define MAX_BYTESPERSAMPLE 3
43 
44 #define APE_FRAMECODE_MONO_SILENCE 1
45 #define APE_FRAMECODE_STEREO_SILENCE 3
46 #define APE_FRAMECODE_PSEUDO_STEREO 4
47 
48 #define HISTORY_SIZE 512
49 #define PREDICTOR_ORDER 8
50 /** Total size of all predictor histories */
51 #define PREDICTOR_SIZE 50
52 
53 #define YDELAYA (18 + PREDICTOR_ORDER*4)
54 #define YDELAYB (18 + PREDICTOR_ORDER*3)
55 #define XDELAYA (18 + PREDICTOR_ORDER*2)
56 #define XDELAYB (18 + PREDICTOR_ORDER)
57 
58 #define YADAPTCOEFFSA 18
59 #define XADAPTCOEFFSA 14
60 #define YADAPTCOEFFSB 10
61 #define XADAPTCOEFFSB 5
62 
63 /**
64  * Possible compression levels
65  * @{
66  */
73 };
74 /** @} */
75 
76 #define APE_FILTER_LEVELS 3
77 
78 /** Filter orders depending on compression level */
79 static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = {
80  { 0, 0, 0 },
81  { 16, 0, 0 },
82  { 64, 0, 0 },
83  { 32, 256, 0 },
84  { 16, 256, 1280 }
85 };
86 
87 /** Filter fraction bits depending on compression level */
89  { 0, 0, 0 },
90  { 11, 0, 0 },
91  { 11, 0, 0 },
92  { 10, 13, 0 },
93  { 11, 13, 15 }
94 };
95 
96 
97 /** Filters applied to the decoded data */
98 typedef struct APEFilter {
99  int16_t *coeffs; ///< actual coefficients used in filtering
100  int16_t *adaptcoeffs; ///< adaptive filter coefficients used for correcting of actual filter coefficients
101  int16_t *historybuffer; ///< filter memory
102  int16_t *delay; ///< filtered values
103 
104  int avg;
105 } APEFilter;
106 
107 typedef struct APERice {
108  uint32_t k;
109  uint32_t ksum;
110 } APERice;
111 
112 typedef struct APERangecoder {
113  uint32_t low; ///< low end of interval
114  uint32_t range; ///< length of interval
115  uint32_t help; ///< bytes_to_follow resp. intermediate value
116  unsigned int buffer; ///< buffer for input/output
117 } APERangecoder;
118 
119 /** Filter histories */
120 typedef struct APEPredictor {
122 
123  int32_t lastA[2];
124 
125  int32_t filterA[2];
126  int32_t filterB[2];
127 
128  uint32_t coeffsA[2][4]; ///< adaption coefficients
129  uint32_t coeffsB[2][5]; ///< adaption coefficients
131 
132  unsigned int sample_pos;
133 } APEPredictor;
134 
135 /** Decoder context */
136 typedef struct APEContext {
137  AVClass *class; ///< class for AVOptions
141  int channels;
142  int samples; ///< samples left to decode in current frame
143  int bps;
144 
145  int fileversion; ///< codec version, very important in decoding process
146  int compression_level; ///< compression levels
147  int fset; ///< which filter set to use (calculated from compression level)
148  int flags; ///< global decoder flags
149 
150  uint32_t CRC; ///< frame CRC
151  int frameflags; ///< frame flags
152  APEPredictor predictor; ///< predictor used for final reconstruction
153 
156  int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel
157  int blocks_per_loop; ///< maximum number of samples to decode for each call
158 
159  int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory
160 
161  APERangecoder rc; ///< rangecoder used to decode actual values
162  APERice riceX; ///< rice code parameters for the second channel
163  APERice riceY; ///< rice code parameters for the first channel
164  APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction
166 
167  uint8_t *data; ///< current frame data
168  uint8_t *data_end; ///< frame data end
169  int data_size; ///< frame data allocated size
170  const uint8_t *ptr; ///< current position in frame data
171 
172  int error;
173 
174  void (*entropy_decode_mono)(struct APEContext *ctx, int blockstodecode);
175  void (*entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode);
178 } APEContext;
179 
180 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
181  int32_t *decoded1, int count);
182 
183 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode);
184 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode);
185 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode);
186 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode);
187 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode);
188 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode);
189 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode);
190 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode);
191 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode);
192 
199 
201 {
202  APEContext *s = avctx->priv_data;
203  int i;
204 
205  for (i = 0; i < APE_FILTER_LEVELS; i++)
206  av_freep(&s->filterbuf[i]);
207 
209  av_freep(&s->data);
210  s->decoded_size = s->data_size = 0;
211 
212  return 0;
213 }
214 
216 {
217  APEContext *s = avctx->priv_data;
218  int i;
219 
220  if (avctx->extradata_size != 6) {
221  av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
222  return AVERROR(EINVAL);
223  }
224  if (avctx->channels > 2) {
225  av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
226  return AVERROR(EINVAL);
227  }
228  s->bps = avctx->bits_per_coded_sample;
229  switch (s->bps) {
230  case 8:
231  avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
232  break;
233  case 16:
235  break;
236  case 24:
238  break;
239  default:
240  avpriv_request_sample(avctx,
241  "%d bits per coded sample", s->bps);
242  return AVERROR_PATCHWELCOME;
243  }
244  s->avctx = avctx;
245  s->channels = avctx->channels;
246  s->fileversion = AV_RL16(avctx->extradata);
247  s->compression_level = AV_RL16(avctx->extradata + 2);
248  s->flags = AV_RL16(avctx->extradata + 4);
249 
250  av_log(avctx, AV_LOG_VERBOSE, "Compression Level: %d - Flags: %d\n",
251  s->compression_level, s->flags);
253  !s->compression_level ||
255  av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
256  s->compression_level);
257  return AVERROR_INVALIDDATA;
258  }
259  s->fset = s->compression_level / 1000 - 1;
260  for (i = 0; i < APE_FILTER_LEVELS; i++) {
261  if (!ape_filter_orders[s->fset][i])
262  break;
263  FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
264  (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
265  filter_alloc_fail);
266  }
267 
268  if (s->fileversion < 3860) {
271  } else if (s->fileversion < 3900) {
274  } else if (s->fileversion < 3930) {
277  } else if (s->fileversion < 3990) {
280  } else {
283  }
284 
285  if (s->fileversion < 3930) {
288  } else if (s->fileversion < 3950) {
291  } else {
294  }
295 
296  ff_bswapdsp_init(&s->bdsp);
297  ff_llauddsp_init(&s->adsp);
299 
300  return 0;
301 filter_alloc_fail:
302  ape_decode_close(avctx);
303  return AVERROR(ENOMEM);
304 }
305 
306 /**
307  * @name APE range decoding functions
308  * @{
309  */
310 
311 #define CODE_BITS 32
312 #define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1))
313 #define SHIFT_BITS (CODE_BITS - 9)
314 #define EXTRA_BITS ((CODE_BITS-2) % 8 + 1)
315 #define BOTTOM_VALUE (TOP_VALUE >> 8)
316 
317 /** Start the decoder */
318 static inline void range_start_decoding(APEContext *ctx)
319 {
320  ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
321  ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
322  ctx->rc.range = (uint32_t) 1 << EXTRA_BITS;
323 }
324 
325 /** Perform normalization */
326 static inline void range_dec_normalize(APEContext *ctx)
327 {
328  while (ctx->rc.range <= BOTTOM_VALUE) {
329  ctx->rc.buffer <<= 8;
330  if(ctx->ptr < ctx->data_end) {
331  ctx->rc.buffer += *ctx->ptr;
332  ctx->ptr++;
333  } else {
334  ctx->error = 1;
335  }
336  ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
337  ctx->rc.range <<= 8;
338  }
339 }
340 
341 /**
342  * Calculate cumulative frequency for next symbol. Does NO update!
343  * @param ctx decoder context
344  * @param tot_f is the total frequency or (code_value)1<<shift
345  * @return the cumulative frequency
346  */
347 static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
348 {
349  range_dec_normalize(ctx);
350  ctx->rc.help = ctx->rc.range / tot_f;
351  return ctx->rc.low / ctx->rc.help;
352 }
353 
354 /**
355  * Decode value with given size in bits
356  * @param ctx decoder context
357  * @param shift number of bits to decode
358  */
359 static inline int range_decode_culshift(APEContext *ctx, int shift)
360 {
361  range_dec_normalize(ctx);
362  ctx->rc.help = ctx->rc.range >> shift;
363  return ctx->rc.low / ctx->rc.help;
364 }
365 
366 
367 /**
368  * Update decoding state
369  * @param ctx decoder context
370  * @param sy_f the interval length (frequency of the symbol)
371  * @param lt_f the lower end (frequency sum of < symbols)
372  */
373 static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
374 {
375  ctx->rc.low -= ctx->rc.help * lt_f;
376  ctx->rc.range = ctx->rc.help * sy_f;
377 }
378 
379 /** Decode n bits (n <= 16) without modelling */
380 static inline int range_decode_bits(APEContext *ctx, int n)
381 {
382  int sym = range_decode_culshift(ctx, n);
383  range_decode_update(ctx, 1, sym);
384  return sym;
385 }
386 
387 
388 #define MODEL_ELEMENTS 64
389 
390 /**
391  * Fixed probabilities for symbols in Monkey Audio version 3.97
392  */
393 static const uint16_t counts_3970[22] = {
394  0, 14824, 28224, 39348, 47855, 53994, 58171, 60926,
395  62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419,
396  65450, 65469, 65480, 65487, 65491, 65493,
397 };
398 
399 /**
400  * Probability ranges for symbols in Monkey Audio version 3.97
401  */
402 static const uint16_t counts_diff_3970[21] = {
403  14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756,
404  1104, 677, 415, 248, 150, 89, 54, 31,
405  19, 11, 7, 4, 2,
406 };
407 
408 /**
409  * Fixed probabilities for symbols in Monkey Audio version 3.98
410  */
411 static const uint16_t counts_3980[22] = {
412  0, 19578, 36160, 48417, 56323, 60899, 63265, 64435,
413  64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482,
414  65485, 65488, 65490, 65491, 65492, 65493,
415 };
416 
417 /**
418  * Probability ranges for symbols in Monkey Audio version 3.98
419  */
420 static const uint16_t counts_diff_3980[21] = {
421  19578, 16582, 12257, 7906, 4576, 2366, 1170, 536,
422  261, 119, 65, 31, 19, 10, 6, 3,
423  3, 2, 1, 1, 1,
424 };
425 
426 /**
427  * Decode symbol
428  * @param ctx decoder context
429  * @param counts probability range start position
430  * @param counts_diff probability range widths
431  */
432 static inline int range_get_symbol(APEContext *ctx,
433  const uint16_t counts[],
434  const uint16_t counts_diff[])
435 {
436  int symbol, cf;
437 
438  cf = range_decode_culshift(ctx, 16);
439 
440  if(cf > 65492){
441  symbol= cf - 65535 + 63;
442  range_decode_update(ctx, 1, cf);
443  if(cf > 65535)
444  ctx->error=1;
445  return symbol;
446  }
447  /* figure out the symbol inefficiently; a binary search would be much better */
448  for (symbol = 0; counts[symbol + 1] <= cf; symbol++);
449 
450  range_decode_update(ctx, counts_diff[symbol], counts[symbol]);
451 
452  return symbol;
453 }
454 /** @} */ // group rangecoder
455 
456 static inline void update_rice(APERice *rice, unsigned int x)
457 {
458  int lim = rice->k ? (1 << (rice->k + 4)) : 0;
459  rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
460 
461  if (rice->ksum < lim)
462  rice->k--;
463  else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
464  rice->k++;
465 }
466 
467 static inline int get_rice_ook(GetBitContext *gb, int k)
468 {
469  unsigned int x;
470 
471  x = get_unary(gb, 1, get_bits_left(gb));
472 
473  if (k)
474  x = (x << k) | get_bits(gb, k);
475 
476  return x;
477 }
478 
480  APERice *rice)
481 {
482  unsigned int x, overflow;
483 
484  overflow = get_unary(gb, 1, get_bits_left(gb));
485 
486  if (ctx->fileversion > 3880) {
487  while (overflow >= 16) {
488  overflow -= 16;
489  rice->k += 4;
490  }
491  }
492 
493  if (!rice->k)
494  x = overflow;
495  else if(rice->k <= MIN_CACHE_BITS) {
496  x = (overflow << rice->k) + get_bits(gb, rice->k);
497  } else {
498  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %"PRIu32"\n", rice->k);
499  ctx->error = 1;
500  return AVERROR_INVALIDDATA;
501  }
502  rice->ksum += x - (rice->ksum + 8 >> 4);
503  if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0))
504  rice->k--;
505  else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
506  rice->k++;
507 
508  /* Convert to signed */
509  return ((x >> 1) ^ ((x & 1) - 1)) + 1;
510 }
511 
512 static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice)
513 {
514  unsigned int x, overflow;
515  int tmpk;
516 
518 
519  if (overflow == (MODEL_ELEMENTS - 1)) {
520  tmpk = range_decode_bits(ctx, 5);
521  overflow = 0;
522  } else
523  tmpk = (rice->k < 1) ? 0 : rice->k - 1;
524 
525  if (tmpk <= 16 || ctx->fileversion < 3910) {
526  if (tmpk > 23) {
527  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
528  return AVERROR_INVALIDDATA;
529  }
530  x = range_decode_bits(ctx, tmpk);
531  } else if (tmpk <= 31) {
532  x = range_decode_bits(ctx, 16);
533  x |= (range_decode_bits(ctx, tmpk - 16) << 16);
534  } else {
535  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
536  return AVERROR_INVALIDDATA;
537  }
538  x += overflow << tmpk;
539 
540  update_rice(rice, x);
541 
542  /* Convert to signed */
543  return ((x >> 1) ^ ((x & 1) - 1)) + 1;
544 }
545 
546 static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice)
547 {
548  unsigned int x, overflow;
549  int base, pivot;
550 
551  pivot = rice->ksum >> 5;
552  if (pivot == 0)
553  pivot = 1;
554 
556 
557  if (overflow == (MODEL_ELEMENTS - 1)) {
558  overflow = (unsigned)range_decode_bits(ctx, 16) << 16;
559  overflow |= range_decode_bits(ctx, 16);
560  }
561 
562  if (pivot < 0x10000) {
563  base = range_decode_culfreq(ctx, pivot);
564  range_decode_update(ctx, 1, base);
565  } else {
566  int base_hi = pivot, base_lo;
567  int bbits = 0;
568 
569  while (base_hi & ~0xFFFF) {
570  base_hi >>= 1;
571  bbits++;
572  }
573  base_hi = range_decode_culfreq(ctx, base_hi + 1);
574  range_decode_update(ctx, 1, base_hi);
575  base_lo = range_decode_culfreq(ctx, 1 << bbits);
576  range_decode_update(ctx, 1, base_lo);
577 
578  base = (base_hi << bbits) + base_lo;
579  }
580 
581  x = base + overflow * pivot;
582 
583  update_rice(rice, x);
584 
585  /* Convert to signed */
586  return ((x >> 1) ^ ((x & 1) - 1)) + 1;
587 }
588 
589 static int get_k(int ksum)
590 {
591  return av_log2(ksum) + !!ksum;
592 }
593 
595  int32_t *out, APERice *rice, int blockstodecode)
596 {
597  int i;
598  unsigned ksummax, ksummin;
599 
600  rice->ksum = 0;
601  for (i = 0; i < FFMIN(blockstodecode, 5); i++) {
602  out[i] = get_rice_ook(&ctx->gb, 10);
603  rice->ksum += out[i];
604  }
605 
606  if (blockstodecode <= 5)
607  goto end;
608 
609  rice->k = get_k(rice->ksum / 10);
610  if (rice->k >= 24)
611  return;
612  for (; i < FFMIN(blockstodecode, 64); i++) {
613  out[i] = get_rice_ook(&ctx->gb, rice->k);
614  rice->ksum += out[i];
615  rice->k = get_k(rice->ksum / ((i + 1) * 2));
616  if (rice->k >= 24)
617  return;
618  }
619 
620  if (blockstodecode <= 64)
621  goto end;
622 
623  rice->k = get_k(rice->ksum >> 7);
624  ksummax = 1 << rice->k + 7;
625  ksummin = rice->k ? (1 << rice->k + 6) : 0;
626  for (; i < blockstodecode; i++) {
627  if (get_bits_left(&ctx->gb) < 1) {
628  ctx->error = 1;
629  return;
630  }
631  out[i] = get_rice_ook(&ctx->gb, rice->k);
632  rice->ksum += out[i] - (unsigned)out[i - 64];
633  while (rice->ksum < ksummin) {
634  rice->k--;
635  ksummin = rice->k ? ksummin >> 1 : 0;
636  ksummax >>= 1;
637  }
638  while (rice->ksum >= ksummax) {
639  rice->k++;
640  if (rice->k > 24)
641  return;
642  ksummax <<= 1;
643  ksummin = ksummin ? ksummin << 1 : 128;
644  }
645  }
646 
647 end:
648  for (i = 0; i < blockstodecode; i++)
649  out[i] = ((out[i] >> 1) ^ ((out[i] & 1) - 1)) + 1;
650 }
651 
652 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
653 {
654  decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
655  blockstodecode);
656 }
657 
658 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
659 {
660  decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
661  blockstodecode);
662  decode_array_0000(ctx, &ctx->gb, ctx->decoded[1], &ctx->riceX,
663  blockstodecode);
664 }
665 
666 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
667 {
668  int32_t *decoded0 = ctx->decoded[0];
669 
670  while (blockstodecode--)
671  *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
672 }
673 
674 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
675 {
676  int32_t *decoded0 = ctx->decoded[0];
677  int32_t *decoded1 = ctx->decoded[1];
678  int blocks = blockstodecode;
679 
680  while (blockstodecode--)
681  *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
682  while (blocks--)
683  *decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX);
684 }
685 
686 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
687 {
688  int32_t *decoded0 = ctx->decoded[0];
689 
690  while (blockstodecode--)
691  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
692 }
693 
694 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
695 {
696  int32_t *decoded0 = ctx->decoded[0];
697  int32_t *decoded1 = ctx->decoded[1];
698  int blocks = blockstodecode;
699 
700  while (blockstodecode--)
701  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
702  range_dec_normalize(ctx);
703  // because of some implementation peculiarities we need to backpedal here
704  ctx->ptr -= 1;
706  while (blocks--)
707  *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
708 }
709 
710 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
711 {
712  int32_t *decoded0 = ctx->decoded[0];
713  int32_t *decoded1 = ctx->decoded[1];
714 
715  while (blockstodecode--) {
716  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
717  *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
718  }
719 }
720 
721 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
722 {
723  int32_t *decoded0 = ctx->decoded[0];
724 
725  while (blockstodecode--)
726  *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
727 }
728 
729 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
730 {
731  int32_t *decoded0 = ctx->decoded[0];
732  int32_t *decoded1 = ctx->decoded[1];
733 
734  while (blockstodecode--) {
735  *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
736  *decoded1++ = ape_decode_value_3990(ctx, &ctx->riceX);
737  }
738 }
739 
741 {
742  /* Read the CRC */
743  if (ctx->fileversion >= 3900) {
744  if (ctx->data_end - ctx->ptr < 6)
745  return AVERROR_INVALIDDATA;
746  ctx->CRC = bytestream_get_be32(&ctx->ptr);
747  } else {
748  ctx->CRC = get_bits_long(&ctx->gb, 32);
749  }
750 
751  /* Read the frame flags if they exist */
752  ctx->frameflags = 0;
753  if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
754  ctx->CRC &= ~0x80000000;
755 
756  if (ctx->data_end - ctx->ptr < 6)
757  return AVERROR_INVALIDDATA;
758  ctx->frameflags = bytestream_get_be32(&ctx->ptr);
759  }
760 
761  /* Initialize the rice structs */
762  ctx->riceX.k = 10;
763  ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
764  ctx->riceY.k = 10;
765  ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;
766 
767  if (ctx->fileversion >= 3900) {
768  /* The first 8 bits of input are ignored. */
769  ctx->ptr++;
770 
772  }
773 
774  return 0;
775 }
776 
778  375,
779 };
780 
781 static const int32_t initial_coeffs_a_3800[3] = {
782  64, 115, 64,
783 };
784 
785 static const int32_t initial_coeffs_b_3800[2] = {
786  740, 0
787 };
788 
789 static const int32_t initial_coeffs_3930[4] = {
790  360, 317, -109, 98
791 };
792 
794 {
795  APEPredictor *p = &ctx->predictor;
796 
797  /* Zero the history buffers */
798  memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer));
799  p->buf = p->historybuffer;
800 
801  /* Initialize and zero the coefficients */
802  if (ctx->fileversion < 3930) {
804  memcpy(p->coeffsA[0], initial_coeffs_fast_3320,
805  sizeof(initial_coeffs_fast_3320));
806  memcpy(p->coeffsA[1], initial_coeffs_fast_3320,
807  sizeof(initial_coeffs_fast_3320));
808  } else {
809  memcpy(p->coeffsA[0], initial_coeffs_a_3800,
810  sizeof(initial_coeffs_a_3800));
811  memcpy(p->coeffsA[1], initial_coeffs_a_3800,
812  sizeof(initial_coeffs_a_3800));
813  }
814  } else {
815  memcpy(p->coeffsA[0], initial_coeffs_3930, sizeof(initial_coeffs_3930));
816  memcpy(p->coeffsA[1], initial_coeffs_3930, sizeof(initial_coeffs_3930));
817  }
818  memset(p->coeffsB, 0, sizeof(p->coeffsB));
819  if (ctx->fileversion < 3930) {
820  memcpy(p->coeffsB[0], initial_coeffs_b_3800,
821  sizeof(initial_coeffs_b_3800));
822  memcpy(p->coeffsB[1], initial_coeffs_b_3800,
823  sizeof(initial_coeffs_b_3800));
824  }
825 
826  p->filterA[0] = p->filterA[1] = 0;
827  p->filterB[0] = p->filterB[1] = 0;
828  p->lastA[0] = p->lastA[1] = 0;
829 
830  p->sample_pos = 0;
831 }
832 
833 /** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */
834 static inline int APESIGN(int32_t x) {
835  return (x < 0) - (x > 0);
836 }
837 
839  const int decoded, const int filter,
840  const int delayA)
841 {
842  int32_t predictionA;
843 
844  p->buf[delayA] = p->lastA[filter];
845  if (p->sample_pos < 3) {
846  p->lastA[filter] = decoded;
847  p->filterA[filter] = decoded;
848  return decoded;
849  }
850 
851  predictionA = p->buf[delayA] * 2U - p->buf[delayA - 1];
852  p->lastA[filter] = decoded + ((int32_t)(predictionA * p->coeffsA[filter][0]) >> 9);
853 
854  if ((decoded ^ predictionA) > 0)
855  p->coeffsA[filter][0]++;
856  else
857  p->coeffsA[filter][0]--;
858 
859  p->filterA[filter] += (unsigned)p->lastA[filter];
860 
861  return p->filterA[filter];
862 }
863 
865  const unsigned decoded, const int filter,
866  const int delayA, const int delayB,
867  const int start, const int shift)
868 {
869  int32_t predictionA, predictionB, sign;
870  int32_t d0, d1, d2, d3, d4;
871 
872  p->buf[delayA] = p->lastA[filter];
873  p->buf[delayB] = p->filterB[filter];
874  if (p->sample_pos < start) {
875  predictionA = decoded + p->filterA[filter];
876  p->lastA[filter] = decoded;
877  p->filterB[filter] = decoded;
878  p->filterA[filter] = predictionA;
879  return predictionA;
880  }
881  d2 = p->buf[delayA];
882  d1 = (p->buf[delayA] - p->buf[delayA - 1]) * 2U;
883  d0 = p->buf[delayA] + ((p->buf[delayA - 2] - p->buf[delayA - 1]) * 8U);
884  d3 = p->buf[delayB] * 2U - p->buf[delayB - 1];
885  d4 = p->buf[delayB];
886 
887  predictionA = d0 * p->coeffsA[filter][0] +
888  d1 * p->coeffsA[filter][1] +
889  d2 * p->coeffsA[filter][2];
890 
891  sign = APESIGN(decoded);
892  p->coeffsA[filter][0] += (((d0 >> 30) & 2) - 1) * sign;
893  p->coeffsA[filter][1] += (((d1 >> 28) & 8) - 4) * sign;
894  p->coeffsA[filter][2] += (((d2 >> 28) & 8) - 4) * sign;
895 
896  predictionB = d3 * p->coeffsB[filter][0] -
897  d4 * p->coeffsB[filter][1];
898  p->lastA[filter] = decoded + (predictionA >> 11);
899  sign = APESIGN(p->lastA[filter]);
900  p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign;
901  p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign;
902 
903  p->filterB[filter] = p->lastA[filter] + (predictionB >> shift);
904  p->filterA[filter] = p->filterB[filter] + (unsigned)((int)(p->filterA[filter] * 31U) >> 5);
905 
906  return p->filterA[filter];
907 }
908 
909 static void long_filter_high_3800(int32_t *buffer, int order, int shift, int length)
910 {
911  int i, j;
912  int32_t dotprod, sign;
913  int32_t coeffs[256], delay[256];
914 
915  if (order >= length)
916  return;
917 
918  memset(coeffs, 0, order * sizeof(*coeffs));
919  for (i = 0; i < order; i++)
920  delay[i] = buffer[i];
921  for (i = order; i < length; i++) {
922  dotprod = 0;
923  sign = APESIGN(buffer[i]);
924  for (j = 0; j < order; j++) {
925  dotprod += delay[j] * (unsigned)coeffs[j];
926  coeffs[j] += ((delay[j] >> 31) | 1) * sign;
927  }
928  buffer[i] -= dotprod >> shift;
929  for (j = 0; j < order - 1; j++)
930  delay[j] = delay[j + 1];
931  delay[order - 1] = buffer[i];
932  }
933 }
934 
936 {
937  int i, j;
938  int32_t dotprod, sign;
939  int32_t delay[8] = { 0 };
940  uint32_t coeffs[8] = { 0 };
941 
942  for (i = 0; i < length; i++) {
943  dotprod = 0;
944  sign = APESIGN(buffer[i]);
945  for (j = 7; j >= 0; j--) {
946  dotprod += delay[j] * coeffs[j];
947  coeffs[j] += ((delay[j] >> 31) | 1) * sign;
948  }
949  for (j = 7; j > 0; j--)
950  delay[j] = delay[j - 1];
951  delay[0] = buffer[i];
952  buffer[i] -= dotprod >> 9;
953  }
954 }
955 
957 {
958  APEPredictor *p = &ctx->predictor;
959  int32_t *decoded0 = ctx->decoded[0];
960  int32_t *decoded1 = ctx->decoded[1];
961  int start = 4, shift = 10;
962 
964  start = 16;
965  long_filter_high_3800(decoded0, 16, 9, count);
966  long_filter_high_3800(decoded1, 16, 9, count);
967  } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
968  int order = 128, shift2 = 11;
969 
970  if (ctx->fileversion >= 3830) {
971  order <<= 1;
972  shift++;
973  shift2++;
974  long_filter_ehigh_3830(decoded0 + order, count - order);
975  long_filter_ehigh_3830(decoded1 + order, count - order);
976  }
977  start = order;
978  long_filter_high_3800(decoded0, order, shift2, count);
979  long_filter_high_3800(decoded1, order, shift2, count);
980  }
981 
982  while (count--) {
983  int X = *decoded0, Y = *decoded1;
985  *decoded0 = filter_fast_3320(p, Y, 0, YDELAYA);
986  decoded0++;
987  *decoded1 = filter_fast_3320(p, X, 1, XDELAYA);
988  decoded1++;
989  } else {
990  *decoded0 = filter_3800(p, Y, 0, YDELAYA, YDELAYB,
991  start, shift);
992  decoded0++;
993  *decoded1 = filter_3800(p, X, 1, XDELAYA, XDELAYB,
994  start, shift);
995  decoded1++;
996  }
997 
998  /* Combined */
999  p->buf++;
1000  p->sample_pos++;
1001 
1002  /* Have we filled the history buffer? */
1003  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1004  memmove(p->historybuffer, p->buf,
1005  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1006  p->buf = p->historybuffer;
1007  }
1008  }
1009 }
1010 
1012 {
1013  APEPredictor *p = &ctx->predictor;
1014  int32_t *decoded0 = ctx->decoded[0];
1015  int start = 4, shift = 10;
1016 
1018  start = 16;
1019  long_filter_high_3800(decoded0, 16, 9, count);
1020  } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
1021  int order = 128, shift2 = 11;
1022 
1023  if (ctx->fileversion >= 3830) {
1024  order <<= 1;
1025  shift++;
1026  shift2++;
1027  long_filter_ehigh_3830(decoded0 + order, count - order);
1028  }
1029  start = order;
1030  long_filter_high_3800(decoded0, order, shift2, count);
1031  }
1032 
1033  while (count--) {
1035  *decoded0 = filter_fast_3320(p, *decoded0, 0, YDELAYA);
1036  decoded0++;
1037  } else {
1038  *decoded0 = filter_3800(p, *decoded0, 0, YDELAYA, YDELAYB,
1039  start, shift);
1040  decoded0++;
1041  }
1042 
1043  /* Combined */
1044  p->buf++;
1045  p->sample_pos++;
1046 
1047  /* Have we filled the history buffer? */
1048  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1049  memmove(p->historybuffer, p->buf,
1050  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1051  p->buf = p->historybuffer;
1052  }
1053  }
1054 }
1055 
1057  const int decoded, const int filter,
1058  const int delayA)
1059 {
1060  int32_t predictionA, sign;
1061  int32_t d0, d1, d2, d3;
1062 
1063  p->buf[delayA] = p->lastA[filter];
1064  d0 = p->buf[delayA ];
1065  d1 = p->buf[delayA ] - p->buf[delayA - 1];
1066  d2 = p->buf[delayA - 1] - p->buf[delayA - 2];
1067  d3 = p->buf[delayA - 2] - p->buf[delayA - 3];
1068 
1069  predictionA = d0 * p->coeffsA[filter][0] +
1070  d1 * p->coeffsA[filter][1] +
1071  d2 * p->coeffsA[filter][2] +
1072  d3 * p->coeffsA[filter][3];
1073 
1074  p->lastA[filter] = decoded + (predictionA >> 9);
1075  p->filterA[filter] = p->lastA[filter] + ((int)(p->filterA[filter] * 31U) >> 5);
1076 
1077  sign = APESIGN(decoded);
1078  p->coeffsA[filter][0] += ((d0 < 0) * 2 - 1) * sign;
1079  p->coeffsA[filter][1] += ((d1 < 0) * 2 - 1) * sign;
1080  p->coeffsA[filter][2] += ((d2 < 0) * 2 - 1) * sign;
1081  p->coeffsA[filter][3] += ((d3 < 0) * 2 - 1) * sign;
1082 
1083  return p->filterA[filter];
1084 }
1085 
1087 {
1088  APEPredictor *p = &ctx->predictor;
1089  int32_t *decoded0 = ctx->decoded[0];
1090  int32_t *decoded1 = ctx->decoded[1];
1091 
1092  ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1093 
1094  while (count--) {
1095  /* Predictor Y */
1096  int Y = *decoded1, X = *decoded0;
1097  *decoded0 = predictor_update_3930(p, Y, 0, YDELAYA);
1098  decoded0++;
1099  *decoded1 = predictor_update_3930(p, X, 1, XDELAYA);
1100  decoded1++;
1101 
1102  /* Combined */
1103  p->buf++;
1104 
1105  /* Have we filled the history buffer? */
1106  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1107  memmove(p->historybuffer, p->buf,
1108  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1109  p->buf = p->historybuffer;
1110  }
1111  }
1112 }
1113 
1115 {
1116  APEPredictor *p = &ctx->predictor;
1117  int32_t *decoded0 = ctx->decoded[0];
1118 
1119  ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1120 
1121  while (count--) {
1122  *decoded0 = predictor_update_3930(p, *decoded0, 0, YDELAYA);
1123  decoded0++;
1124 
1125  p->buf++;
1126 
1127  /* Have we filled the history buffer? */
1128  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1129  memmove(p->historybuffer, p->buf,
1130  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1131  p->buf = p->historybuffer;
1132  }
1133  }
1134 }
1135 
1137  const int decoded, const int filter,
1138  const int delayA, const int delayB,
1139  const int adaptA, const int adaptB)
1140 {
1141  int32_t predictionA, predictionB, sign;
1142 
1143  p->buf[delayA] = p->lastA[filter];
1144  p->buf[adaptA] = APESIGN(p->buf[delayA]);
1145  p->buf[delayA - 1] = p->buf[delayA] - (unsigned)p->buf[delayA - 1];
1146  p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);
1147 
1148  predictionA = p->buf[delayA ] * p->coeffsA[filter][0] +
1149  p->buf[delayA - 1] * p->coeffsA[filter][1] +
1150  p->buf[delayA - 2] * p->coeffsA[filter][2] +
1151  p->buf[delayA - 3] * p->coeffsA[filter][3];
1152 
1153  /* Apply a scaled first-order filter compression */
1154  p->buf[delayB] = p->filterA[filter ^ 1] - ((int)(p->filterB[filter] * 31U) >> 5);
1155  p->buf[adaptB] = APESIGN(p->buf[delayB]);
1156  p->buf[delayB - 1] = p->buf[delayB] - (unsigned)p->buf[delayB - 1];
1157  p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);
1158  p->filterB[filter] = p->filterA[filter ^ 1];
1159 
1160  predictionB = p->buf[delayB ] * p->coeffsB[filter][0] +
1161  p->buf[delayB - 1] * p->coeffsB[filter][1] +
1162  p->buf[delayB - 2] * p->coeffsB[filter][2] +
1163  p->buf[delayB - 3] * p->coeffsB[filter][3] +
1164  p->buf[delayB - 4] * p->coeffsB[filter][4];
1165 
1166  p->lastA[filter] = decoded + ((int)((unsigned)predictionA + (predictionB >> 1)) >> 10);
1167  p->filterA[filter] = p->lastA[filter] + ((int)(p->filterA[filter] * 31U) >> 5);
1168 
1169  sign = APESIGN(decoded);
1170  p->coeffsA[filter][0] += p->buf[adaptA ] * sign;
1171  p->coeffsA[filter][1] += p->buf[adaptA - 1] * sign;
1172  p->coeffsA[filter][2] += p->buf[adaptA - 2] * sign;
1173  p->coeffsA[filter][3] += p->buf[adaptA - 3] * sign;
1174  p->coeffsB[filter][0] += p->buf[adaptB ] * sign;
1175  p->coeffsB[filter][1] += p->buf[adaptB - 1] * sign;
1176  p->coeffsB[filter][2] += p->buf[adaptB - 2] * sign;
1177  p->coeffsB[filter][3] += p->buf[adaptB - 3] * sign;
1178  p->coeffsB[filter][4] += p->buf[adaptB - 4] * sign;
1179 
1180  return p->filterA[filter];
1181 }
1182 
1184 {
1185  APEPredictor *p = &ctx->predictor;
1186  int32_t *decoded0 = ctx->decoded[0];
1187  int32_t *decoded1 = ctx->decoded[1];
1188 
1189  ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1190 
1191  while (count--) {
1192  /* Predictor Y */
1193  *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
1195  decoded0++;
1196  *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
1198  decoded1++;
1199 
1200  /* Combined */
1201  p->buf++;
1202 
1203  /* Have we filled the history buffer? */
1204  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1205  memmove(p->historybuffer, p->buf,
1206  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1207  p->buf = p->historybuffer;
1208  }
1209  }
1210 }
1211 
1213 {
1214  APEPredictor *p = &ctx->predictor;
1215  int32_t *decoded0 = ctx->decoded[0];
1216  int32_t predictionA, currentA, A, sign;
1217 
1218  ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1219 
1220  currentA = p->lastA[0];
1221 
1222  while (count--) {
1223  A = *decoded0;
1224 
1225  p->buf[YDELAYA] = currentA;
1226  p->buf[YDELAYA - 1] = p->buf[YDELAYA] - (unsigned)p->buf[YDELAYA - 1];
1227 
1228  predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] +
1229  p->buf[YDELAYA - 1] * p->coeffsA[0][1] +
1230  p->buf[YDELAYA - 2] * p->coeffsA[0][2] +
1231  p->buf[YDELAYA - 3] * p->coeffsA[0][3];
1232 
1233  currentA = A + (unsigned)(predictionA >> 10);
1234 
1235  p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]);
1236  p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);
1237 
1238  sign = APESIGN(A);
1239  p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign;
1240  p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign;
1241  p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign;
1242  p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign;
1243 
1244  p->buf++;
1245 
1246  /* Have we filled the history buffer? */
1247  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1248  memmove(p->historybuffer, p->buf,
1249  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1250  p->buf = p->historybuffer;
1251  }
1252 
1253  p->filterA[0] = currentA + (unsigned)((int)(p->filterA[0] * 31U) >> 5);
1254  *(decoded0++) = p->filterA[0];
1255  }
1256 
1257  p->lastA[0] = currentA;
1258 }
1259 
1260 static void do_init_filter(APEFilter *f, int16_t *buf, int order)
1261 {
1262  f->coeffs = buf;
1263  f->historybuffer = buf + order;
1264  f->delay = f->historybuffer + order * 2;
1265  f->adaptcoeffs = f->historybuffer + order;
1266 
1267  memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer));
1268  memset(f->coeffs, 0, order * sizeof(*f->coeffs));
1269  f->avg = 0;
1270 }
1271 
1272 static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
1273 {
1274  do_init_filter(&f[0], buf, order);
1275  do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
1276 }
1277 
1279  int32_t *data, int count, int order, int fracbits)
1280 {
1281  int res;
1282  int absres;
1283 
1284  while (count--) {
1285  /* round fixedpoint scalar product */
1287  f->delay - order,
1288  f->adaptcoeffs - order,
1289  order, APESIGN(*data));
1290  res = (int)(res + (1U << (fracbits - 1))) >> fracbits;
1291  res += (unsigned)*data;
1292  *data++ = res;
1293 
1294  /* Update the output history */
1295  *f->delay++ = av_clip_int16(res);
1296 
1297  if (version < 3980) {
1298  /* Version ??? to < 3.98 files (untested) */
1299  f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
1300  f->adaptcoeffs[-4] >>= 1;
1301  f->adaptcoeffs[-8] >>= 1;
1302  } else {
1303  /* Version 3.98 and later files */
1304 
1305  /* Update the adaption coefficients */
1306  absres = res < 0 ? -(unsigned)res : res;
1307  if (absres)
1308  *f->adaptcoeffs = APESIGN(res) *
1309  (8 << ((absres > f->avg * 3) + (absres > f->avg * 4 / 3)));
1310  /* equivalent to the following code
1311  if (absres <= f->avg * 4 / 3)
1312  *f->adaptcoeffs = APESIGN(res) * 8;
1313  else if (absres <= f->avg * 3)
1314  *f->adaptcoeffs = APESIGN(res) * 16;
1315  else
1316  *f->adaptcoeffs = APESIGN(res) * 32;
1317  */
1318  else
1319  *f->adaptcoeffs = 0;
1320 
1321  f->avg += (int)(absres - (unsigned)f->avg) / 16;
1322 
1323  f->adaptcoeffs[-1] >>= 1;
1324  f->adaptcoeffs[-2] >>= 1;
1325  f->adaptcoeffs[-8] >>= 1;
1326  }
1327 
1328  f->adaptcoeffs++;
1329 
1330  /* Have we filled the history buffer? */
1331  if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
1332  memmove(f->historybuffer, f->delay - (order * 2),
1333  (order * 2) * sizeof(*f->historybuffer));
1334  f->delay = f->historybuffer + order * 2;
1335  f->adaptcoeffs = f->historybuffer + order;
1336  }
1337  }
1338 }
1339 
1341  int32_t *data0, int32_t *data1,
1342  int count, int order, int fracbits)
1343 {
1344  do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
1345  if (data1)
1346  do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
1347 }
1348 
1349 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
1350  int32_t *decoded1, int count)
1351 {
1352  int i;
1353 
1354  for (i = 0; i < APE_FILTER_LEVELS; i++) {
1355  if (!ape_filter_orders[ctx->fset][i])
1356  break;
1357  apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
1358  ape_filter_orders[ctx->fset][i],
1359  ape_filter_fracbits[ctx->fset][i]);
1360  }
1361 }
1362 
1364 {
1365  int i, ret;
1366  if ((ret = init_entropy_decoder(ctx)) < 0)
1367  return ret;
1369 
1370  for (i = 0; i < APE_FILTER_LEVELS; i++) {
1371  if (!ape_filter_orders[ctx->fset][i])
1372  break;
1373  init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
1374  ape_filter_orders[ctx->fset][i]);
1375  }
1376  return 0;
1377 }
1378 
1380 {
1382  /* We are pure silence, so we're done. */
1383  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
1384  return;
1385  }
1386 
1387  ctx->entropy_decode_mono(ctx, count);
1388  if (ctx->error)
1389  return;
1390 
1391  /* Now apply the predictor decoding */
1392  ctx->predictor_decode_mono(ctx, count);
1393 
1394  /* Pseudo-stereo - just copy left channel to right channel */
1395  if (ctx->channels == 2) {
1396  memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1]));
1397  }
1398 }
1399 
1401 {
1402  unsigned left, right;
1403  int32_t *decoded0 = ctx->decoded[0];
1404  int32_t *decoded1 = ctx->decoded[1];
1405 
1407  /* We are pure silence, so we're done. */
1408  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");
1409  return;
1410  }
1411 
1412  ctx->entropy_decode_stereo(ctx, count);
1413  if (ctx->error)
1414  return;
1415 
1416  /* Now apply the predictor decoding */
1417  ctx->predictor_decode_stereo(ctx, count);
1418 
1419  /* Decorrelate and scale to output depth */
1420  while (count--) {
1421  left = *decoded1 - (unsigned)(*decoded0 / 2);
1422  right = left + *decoded0;
1423 
1424  *(decoded0++) = left;
1425  *(decoded1++) = right;
1426  }
1427 }
1428 
1430  int *got_frame_ptr, AVPacket *avpkt)
1431 {
1432  AVFrame *frame = data;
1433  const uint8_t *buf = avpkt->data;
1434  APEContext *s = avctx->priv_data;
1435  uint8_t *sample8;
1436  int16_t *sample16;
1437  int32_t *sample24;
1438  int i, ch, ret;
1439  int blockstodecode;
1440  uint64_t decoded_buffer_size;
1441 
1442  /* this should never be negative, but bad things will happen if it is, so
1443  check it just to make sure. */
1444  av_assert0(s->samples >= 0);
1445 
1446  if(!s->samples){
1447  uint32_t nblocks, offset;
1448  int buf_size;
1449 
1450  if (!avpkt->size) {
1451  *got_frame_ptr = 0;
1452  return 0;
1453  }
1454  if (avpkt->size < 8) {
1455  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1456  return AVERROR_INVALIDDATA;
1457  }
1458  buf_size = avpkt->size & ~3;
1459  if (buf_size != avpkt->size) {
1460  av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. "
1461  "extra bytes at the end will be skipped.\n");
1462  }
1463  if (s->fileversion < 3950) // previous versions overread two bytes
1464  buf_size += 2;
1465  av_fast_padded_malloc(&s->data, &s->data_size, buf_size);
1466  if (!s->data)
1467  return AVERROR(ENOMEM);
1468  s->bdsp.bswap_buf((uint32_t *) s->data, (const uint32_t *) buf,
1469  buf_size >> 2);
1470  memset(s->data + (buf_size & ~3), 0, buf_size & 3);
1471  s->ptr = s->data;
1472  s->data_end = s->data + buf_size;
1473 
1474  nblocks = bytestream_get_be32(&s->ptr);
1475  offset = bytestream_get_be32(&s->ptr);
1476  if (s->fileversion >= 3900) {
1477  if (offset > 3) {
1478  av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
1479  av_freep(&s->data);
1480  s->data_size = 0;
1481  return AVERROR_INVALIDDATA;
1482  }
1483  if (s->data_end - s->ptr < offset) {
1484  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1485  return AVERROR_INVALIDDATA;
1486  }
1487  s->ptr += offset;
1488  } else {
1489  if ((ret = init_get_bits8(&s->gb, s->ptr, s->data_end - s->ptr)) < 0)
1490  return ret;
1491  if (s->fileversion > 3800)
1492  skip_bits_long(&s->gb, offset * 8);
1493  else
1494  skip_bits_long(&s->gb, offset);
1495  }
1496 
1497  if (!nblocks || nblocks > INT_MAX / 2 / sizeof(*s->decoded_buffer) - 8) {
1498  av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %"PRIu32".\n",
1499  nblocks);
1500  return AVERROR_INVALIDDATA;
1501  }
1502 
1503  /* Initialize the frame decoder */
1504  if (init_frame_decoder(s) < 0) {
1505  av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
1506  return AVERROR_INVALIDDATA;
1507  }
1508  s->samples = nblocks;
1509  }
1510 
1511  if (!s->data) {
1512  *got_frame_ptr = 0;
1513  return avpkt->size;
1514  }
1515 
1516  blockstodecode = FFMIN(s->blocks_per_loop, s->samples);
1517  // for old files coefficients were not interleaved,
1518  // so we need to decode all of them at once
1519  if (s->fileversion < 3930)
1520  blockstodecode = s->samples;
1521 
1522  /* reallocate decoded sample buffer if needed */
1523  decoded_buffer_size = 2LL * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer);
1524  av_assert0(decoded_buffer_size <= INT_MAX);
1525 
1526  /* get output buffer */
1527  frame->nb_samples = blockstodecode;
1528  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
1529  s->samples=0;
1530  return ret;
1531  }
1532 
1533  av_fast_malloc(&s->decoded_buffer, &s->decoded_size, decoded_buffer_size);
1534  if (!s->decoded_buffer)
1535  return AVERROR(ENOMEM);
1536  memset(s->decoded_buffer, 0, decoded_buffer_size);
1537  s->decoded[0] = s->decoded_buffer;
1538  s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
1539 
1540  s->error=0;
1541 
1542  if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))
1543  ape_unpack_mono(s, blockstodecode);
1544  else
1545  ape_unpack_stereo(s, blockstodecode);
1546  emms_c();
1547 
1548  if (s->error) {
1549  s->samples=0;
1550  av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
1551  return AVERROR_INVALIDDATA;
1552  }
1553 
1554  switch (s->bps) {
1555  case 8:
1556  for (ch = 0; ch < s->channels; ch++) {
1557  sample8 = (uint8_t *)frame->data[ch];
1558  for (i = 0; i < blockstodecode; i++)
1559  *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
1560  }
1561  break;
1562  case 16:
1563  for (ch = 0; ch < s->channels; ch++) {
1564  sample16 = (int16_t *)frame->data[ch];
1565  for (i = 0; i < blockstodecode; i++)
1566  *sample16++ = s->decoded[ch][i];
1567  }
1568  break;
1569  case 24:
1570  for (ch = 0; ch < s->channels; ch++) {
1571  sample24 = (int32_t *)frame->data[ch];
1572  for (i = 0; i < blockstodecode; i++)
1573  *sample24++ = s->decoded[ch][i] * 256;
1574  }
1575  break;
1576  }
1577 
1578  s->samples -= blockstodecode;
1579 
1580  *got_frame_ptr = 1;
1581 
1582  return !s->samples ? avpkt->size : 0;
1583 }
1584 
1586 {
1587  APEContext *s = avctx->priv_data;
1588  s->samples= 0;
1589 }
1590 
1591 #define OFFSET(x) offsetof(APEContext, x)
1592 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
1593 static const AVOption options[] = {
1594  { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" },
1595  { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" },
1596  { NULL},
1597 };
1598 
1599 static const AVClass ape_decoder_class = {
1600  .class_name = "APE decoder",
1601  .item_name = av_default_item_name,
1602  .option = options,
1603  .version = LIBAVUTIL_VERSION_INT,
1604 };
1605 
1607  .name = "ape",
1608  .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
1609  .type = AVMEDIA_TYPE_AUDIO,
1610  .id = AV_CODEC_ID_APE,
1611  .priv_data_size = sizeof(APEContext),
1612  .init = ape_decode_init,
1613  .close = ape_decode_close,
1615  .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY |
1617  .flush = ape_flush,
1618  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
1622  .priv_class = &ape_decoder_class,
1623 };
static int init_frame_decoder(APEContext *ctx)
Definition: apedec.c:1363
static const int32_t initial_coeffs_3930[4]
Definition: apedec.c:789
static void decode_array_0000(APEContext *ctx, GetBitContext *gb, int32_t *out, APERice *rice, int blockstodecode)
Definition: apedec.c:594
int compression_level
compression levels
Definition: apedec.c:146
AVCodec ff_ape_decoder
Definition: apedec.c:1606
#define MODEL_ELEMENTS
Definition: apedec.c:388
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int decoded_size
Definition: apedec.c:155
#define YADAPTCOEFFSB
Definition: apedec.c:60
version
Definition: libkvazaar.c:292
static int shift(int a, int b)
Definition: sonic.c:82
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static void range_start_decoding(APEContext *ctx)
Start the decoder.
Definition: apedec.c:318
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
#define XDELAYA
Definition: apedec.c:55
static void apply_filter(APEContext *ctx, APEFilter *f, int32_t *data0, int32_t *data1, int count, int order, int fracbits)
Definition: apedec.c:1340
int fileversion
codec version, very important in decoding process
Definition: apedec.c:145
static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
Definition: apedec.c:658
int32_t filterA[2]
Definition: apedec.c:125
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
void(* entropy_decode_mono)(struct APEContext *ctx, int blockstodecode)
Definition: apedec.c:174
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define avpriv_request_sample(...)
void(* entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode)
Definition: apedec.c:175
static int APESIGN(int32_t x)
Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero)
Definition: apedec.c:834
static void update_rice(APERice *rice, unsigned int x)
Definition: apedec.c:456
int size
Definition: avcodec.h:1534
static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
Definition: apedec.c:694
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
static av_cold int ape_decode_init(AVCodecContext *avctx)
Definition: apedec.c:215
unsigned int buffer
buffer for input/output
Definition: apedec.c:116
int av_log2(unsigned v)
Definition: intmath.c:26
static void long_filter_high_3800(int32_t *buffer, int order, int shift, int length)
Definition: apedec.c:909
static int init_entropy_decoder(APEContext *ctx)
Definition: apedec.c:740
static void ape_flush(AVCodecContext *avctx)
Definition: apedec.c:1585
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
Definition: apedec.c:710
static int get_k(int ksum)
Definition: apedec.c:589
static av_always_inline int predictor_update_3930(APEPredictor *p, const int decoded, const int filter, const int delayA)
Definition: apedec.c:1056
#define AV_CH_LAYOUT_STEREO
#define OFFSET(x)
Definition: apedec.c:1591
#define XADAPTCOEFFSA
Definition: apedec.c:59
AVCodec.
Definition: avcodec.h:3555
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int16_t * filterbuf[APE_FILTER_LEVELS]
filter memory
Definition: apedec.c:159
static void predictor_decode_mono_3800(APEContext *ctx, int count)
Definition: apedec.c:1011
uint8_t base
Definition: vp3data.h:202
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1024
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int ape_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: apedec.c:1429
Filter histories.
Definition: apedec.c:120
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2289
uint8_t
#define av_cold
Definition: attributes.h:82
int16_t * delay
filtered values
Definition: apedec.c:102
AVOptions.
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
static void do_init_filter(APEFilter *f, int16_t *buf, int order)
Definition: apedec.c:1260
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
static const int32_t initial_coeffs_a_3800[3]
Definition: apedec.c:781
static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
Definition: apedec.c:674
static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
Definition: apedec.c:721
static void ape_unpack_mono(APEContext *ctx, int count)
Definition: apedec.c:1379
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1722
APERangecoder rc
rangecoder used to decode actual values
Definition: apedec.c:161
#define YDELAYB
Definition: apedec.c:54
static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS]
Filter fraction bits depending on compression level.
Definition: apedec.c:88
uint8_t * data
Definition: avcodec.h:1533
static void ape_apply_filters(APEContext *ctx, int32_t *decoded0, int32_t *decoded1, int count)
Definition: apedec.c:1349
bitstream reader API header.
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2845
Decoder context.
Definition: apedec.c:136
#define A(x)
Definition: vp56_arith.h:28
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static const uint16_t counts_3970[22]
Fixed probabilities for symbols in Monkey Audio version 3.97.
Definition: apedec.c:393
static void range_dec_normalize(APEContext *ctx)
Perform normalization.
Definition: apedec.c:326
#define U(x)
Definition: vp56_arith.h:37
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
static const uint16_t counts_diff_3980[21]
Probability ranges for symbols in Monkey Audio version 3.98.
Definition: apedec.c:420
int bps
Definition: apedec.c:143
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
void(* predictor_decode_mono)(struct APEContext *ctx, int count)
Definition: apedec.c:176
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define YDELAYA
Definition: apedec.c:53
int32_t lastA[2]
Definition: apedec.c:123
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static av_cold int ape_decode_close(AVCodecContext *avctx)
Definition: apedec.c:200
static int ape_decode_value_3900(APEContext *ctx, APERice *rice)
Definition: apedec.c:512
int32_t historybuffer[HISTORY_SIZE+PREDICTOR_SIZE]
Definition: apedec.c:130
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define XDELAYB
Definition: apedec.c:56
int32_t * decoded_buffer
Definition: apedec.c:154
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
int avg
Definition: apedec.c:104
const char * name
Name of the codec implementation.
Definition: avcodec.h:3562
static int range_decode_culshift(APEContext *ctx, int shift)
Decode value with given size in bits.
Definition: apedec.c:359
#define APE_FILTER_LEVELS
Definition: apedec.c:76
GLsizei count
Definition: opengl_enc.c:108
int error
Definition: apedec.c:172
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2332
static int range_decode_bits(APEContext *ctx, int n)
Decode n bits (n <= 16) without modelling.
Definition: apedec.c:380
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
audio channel layout utility functions
#define Y
Definition: boxblur.h:38
static void predictor_decode_mono_3930(APEContext *ctx, int count)
Definition: apedec.c:1114
uint8_t * data
current frame data
Definition: apedec.c:167
static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS]
Filter orders depending on compression level.
Definition: apedec.c:79
#define FFMIN(a, b)
Definition: common.h:96
signed 32 bits, planar
Definition: samplefmt.h:68
static int get_rice_ook(GetBitContext *gb, int k)
Definition: apedec.c:467
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static av_always_inline int filter_fast_3320(APEPredictor *p, const int decoded, const int filter, const int delayA)
Definition: apedec.c:838
AVCodecContext * avctx
Definition: apedec.c:138
static void ape_unpack_stereo(APEContext *ctx, int count)
Definition: apedec.c:1400
const uint8_t * ptr
current position in frame data
Definition: apedec.c:170
int32_t
static int range_decode_culfreq(APEContext *ctx, int tot_f)
Calculate cumulative frequency for next symbol.
Definition: apedec.c:347
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int32_t(* scalarproduct_and_madd_int16)(int16_t *v1, const int16_t *v2, const int16_t *v3, int len, int mul)
Calculate scalar product of v1 and v2, and v1[i] += v3[i] * mul.
unsigned 8 bits, planar
Definition: samplefmt.h:66
static void predictor_decode_stereo_3930(APEContext *ctx, int count)
Definition: apedec.c:1086
uint32_t ksum
Definition: apedec.c:109
av_cold void ff_llauddsp_init(LLAudDSPContext *c)
uint32_t help
bytes_to_follow resp. intermediate value
Definition: apedec.c:115
uint32_t coeffsA[2][4]
adaption coefficients
Definition: apedec.c:128
static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
Definition: apedec.c:729
#define APE_FRAMECODE_PSEUDO_STEREO
Definition: apedec.c:46
uint32_t range
length of interval
Definition: apedec.c:114
if(ret)
int samples
samples left to decode in current frame
Definition: apedec.c:142
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int fset
which filter set to use (calculated from compression level)
Definition: apedec.c:147
static int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb, APERice *rice)
Definition: apedec.c:479
APERice riceX
rice code parameters for the second channel
Definition: apedec.c:162
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static void predictor_decode_stereo_3950(APEContext *ctx, int count)
Definition: apedec.c:1183
static void predictor_decode_stereo_3800(APEContext *ctx, int count)
Definition: apedec.c:956
LLAudDSPContext adsp
Definition: apedec.c:140
Definition: vf_addroi.c:26
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
#define APE_FRAMECODE_STEREO_SILENCE
Definition: apedec.c:45
static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
Definition: apedec.c:1272
int frameflags
frame flags
Definition: apedec.c:151
main external API structure.
Definition: avcodec.h:1621
static av_always_inline int filter_3800(APEPredictor *p, const unsigned decoded, const int filter, const int delayA, const int delayB, const int start, const int shift)
Definition: apedec.c:864
static int ape_decode_value_3990(APEContext *ctx, APERice *rice)
Definition: apedec.c:546
uint32_t CRC
frame CRC
Definition: apedec.c:150
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1969
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
BswapDSPContext bdsp
Definition: apedec.c:139
unsigned int sample_pos
Definition: apedec.c:132
int extradata_size
Definition: avcodec.h:1723
static const uint16_t counts_3980[22]
Fixed probabilities for symbols in Monkey Audio version 3.98.
Definition: apedec.c:411
static int range_get_symbol(APEContext *ctx, const uint16_t counts[], const uint16_t counts_diff[])
Decode symbol.
Definition: apedec.c:432
Describe the class of an AVClass context structure.
Definition: log.h:67
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:1042
uint32_t low
low end of interval
Definition: apedec.c:113
int flags
global decoder flags
Definition: apedec.c:148
APECompressionLevel
Possible compression levels.
Definition: apedec.c:67
void(* predictor_decode_stereo)(struct APEContext *ctx, int count)
Definition: apedec.c:177
#define EXTRA_BITS
Definition: apedec.c:314
static void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
Update decoding state.
Definition: apedec.c:373
static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
Definition: apedec.c:686
uint32_t k
Definition: apedec.c:108
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
#define MAX_CHANNELS
Definition: apedec.c:41
static const int32_t initial_coeffs_fast_3320[1]
Definition: apedec.c:777
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define MIN_CACHE_BITS
Definition: get_bits.h:128
static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, int32_t *data, int count, int order, int fracbits)
Definition: apedec.c:1278
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
#define PREDICTOR_SIZE
Total size of all predictor histories.
Definition: apedec.c:51
static const uint16_t counts_diff_3970[21]
Probability ranges for symbols in Monkey Audio version 3.97.
Definition: apedec.c:402
int blocks_per_loop
maximum number of samples to decode for each call
Definition: apedec.c:157
int
uint8_t * data_end
frame data end
Definition: apedec.c:168
common internal api header.
APERice riceY
rice code parameters for the first channel
Definition: apedec.c:163
static const int shift2[6]
Definition: dxa.c:51
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:140
APEFilter filters[APE_FILTER_LEVELS][2]
filters used for reconstruction
Definition: apedec.c:164
static av_always_inline int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB)
Definition: apedec.c:1136
int16_t * coeffs
actual coefficients used in filtering
Definition: apedec.c:99
int32_t filterB[2]
Definition: apedec.c:126
#define YADAPTCOEFFSA
Definition: apedec.c:58
#define PAR
Definition: apedec.c:1592
static void init_predictor_decoder(APEContext *ctx)
Definition: apedec.c:793
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1648
static const int32_t initial_coeffs_b_3800[2]
Definition: apedec.c:785
APEPredictor predictor
predictor used for final reconstruction
Definition: apedec.c:152
static const AVClass ape_decoder_class
Definition: apedec.c:1599
int channels
number of audio channels
Definition: avcodec.h:2282
static void long_filter_ehigh_3830(int32_t *buffer, int length)
Definition: apedec.c:935
static void predictor_decode_mono_3950(APEContext *ctx, int count)
Definition: apedec.c:1212
GetBitContext gb
Definition: apedec.c:165
Filters applied to the decoded data.
Definition: apedec.c:98
static const struct PPFilter filters[]
Definition: postprocess.c:134
uint32_t coeffsB[2][5]
adaption coefficients
Definition: apedec.c:129
#define XADAPTCOEFFSB
Definition: apedec.c:61
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int32_t * decoded[MAX_CHANNELS]
decoded data for each channel
Definition: apedec.c:156
int32_t * buf
Definition: apedec.c:121
FILE * out
Definition: movenc.c:54
#define av_freep(p)
signed 16 bits, planar
Definition: samplefmt.h:67
#define HISTORY_SIZE
Definition: apedec.c:48
#define av_always_inline
Definition: attributes.h:39
int data_size
frame data allocated size
Definition: apedec.c:169
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static const AVOption options[]
Definition: apedec.c:1593
#define AV_CH_LAYOUT_MONO
int16_t * adaptcoeffs
adaptive filter coefficients used for correcting of actual filter coefficients
Definition: apedec.c:100
int channels
Definition: apedec.c:141
#define BOTTOM_VALUE
Definition: apedec.c:315
This structure stores compressed data.
Definition: avcodec.h:1510
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:999
for(j=16;j >0;--j)
static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
Definition: apedec.c:652
GLuint buffer
Definition: opengl_enc.c:101
int16_t * historybuffer
filter memory
Definition: apedec.c:101
static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
Definition: apedec.c:666