FFmpeg
apedec.c
Go to the documentation of this file.
1 /*
2  * Monkey's Audio lossless audio decoder
3  * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
4  * based upon libdemac from Dave Chapman.
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <inttypes.h>
24 
25 #include "libavutil/avassert.h"
27 #include "libavutil/opt.h"
28 #include "lossless_audiodsp.h"
29 #include "avcodec.h"
30 #include "bswapdsp.h"
31 #include "bytestream.h"
32 #include "internal.h"
33 #include "get_bits.h"
34 #include "unary.h"
35 
36 /**
37  * @file
38  * Monkey's Audio lossless audio decoder
39  */
40 
41 #define MAX_CHANNELS 2
42 #define MAX_BYTESPERSAMPLE 3
43 
44 #define APE_FRAMECODE_MONO_SILENCE 1
45 #define APE_FRAMECODE_STEREO_SILENCE 3
46 #define APE_FRAMECODE_PSEUDO_STEREO 4
47 
48 #define HISTORY_SIZE 512
49 #define PREDICTOR_ORDER 8
50 /** Total size of all predictor histories */
51 #define PREDICTOR_SIZE 50
52 
53 #define YDELAYA (18 + PREDICTOR_ORDER*4)
54 #define YDELAYB (18 + PREDICTOR_ORDER*3)
55 #define XDELAYA (18 + PREDICTOR_ORDER*2)
56 #define XDELAYB (18 + PREDICTOR_ORDER)
57 
58 #define YADAPTCOEFFSA 18
59 #define XADAPTCOEFFSA 14
60 #define YADAPTCOEFFSB 10
61 #define XADAPTCOEFFSB 5
62 
63 /**
64  * Possible compression levels
65  * @{
66  */
73 };
74 /** @} */
75 
76 #define APE_FILTER_LEVELS 3
77 
78 /** Filter orders depending on compression level */
79 static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = {
80  { 0, 0, 0 },
81  { 16, 0, 0 },
82  { 64, 0, 0 },
83  { 32, 256, 0 },
84  { 16, 256, 1280 }
85 };
86 
87 /** Filter fraction bits depending on compression level */
89  { 0, 0, 0 },
90  { 11, 0, 0 },
91  { 11, 0, 0 },
92  { 10, 13, 0 },
93  { 11, 13, 15 }
94 };
95 
96 
97 /** Filters applied to the decoded data */
98 typedef struct APEFilter {
99  int16_t *coeffs; ///< actual coefficients used in filtering
100  int16_t *adaptcoeffs; ///< adaptive filter coefficients used for correcting of actual filter coefficients
101  int16_t *historybuffer; ///< filter memory
102  int16_t *delay; ///< filtered values
103 
104  uint32_t avg;
105 } APEFilter;
106 
107 typedef struct APERice {
108  uint32_t k;
109  uint32_t ksum;
110 } APERice;
111 
112 typedef struct APERangecoder {
113  uint32_t low; ///< low end of interval
114  uint32_t range; ///< length of interval
115  uint32_t help; ///< bytes_to_follow resp. intermediate value
116  unsigned int buffer; ///< buffer for input/output
117 } APERangecoder;
118 
119 /** Filter histories */
120 typedef struct APEPredictor {
122 
124 
127 
128  int32_t coeffsA[2][4]; ///< adaption coefficients
129  int32_t coeffsB[2][5]; ///< adaption coefficients
131 
132  unsigned int sample_pos;
133 } APEPredictor;
134 
135 /** Decoder context */
136 typedef struct APEContext {
137  AVClass *class; ///< class for AVOptions
141  int channels;
142  int samples; ///< samples left to decode in current frame
143  int bps;
144 
145  int fileversion; ///< codec version, very important in decoding process
146  int compression_level; ///< compression levels
147  int fset; ///< which filter set to use (calculated from compression level)
148  int flags; ///< global decoder flags
149 
150  uint32_t CRC; ///< frame CRC
151  int frameflags; ///< frame flags
152  APEPredictor predictor; ///< predictor used for final reconstruction
153 
156  int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel
157  int blocks_per_loop; ///< maximum number of samples to decode for each call
158 
159  int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory
160 
161  APERangecoder rc; ///< rangecoder used to decode actual values
162  APERice riceX; ///< rice code parameters for the second channel
163  APERice riceY; ///< rice code parameters for the first channel
164  APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction
166 
167  uint8_t *data; ///< current frame data
168  uint8_t *data_end; ///< frame data end
169  int data_size; ///< frame data allocated size
170  const uint8_t *ptr; ///< current position in frame data
171 
172  int error;
173 
174  void (*entropy_decode_mono)(struct APEContext *ctx, int blockstodecode);
175  void (*entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode);
176  void (*predictor_decode_mono)(struct APEContext *ctx, int count);
178 } APEContext;
179 
180 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
181  int32_t *decoded1, int count);
182 
183 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode);
184 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode);
185 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode);
186 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode);
187 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode);
188 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode);
189 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode);
190 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode);
191 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode);
192 
199 
201 {
203  int i;
204 
205  for (i = 0; i < APE_FILTER_LEVELS; i++)
206  av_freep(&s->filterbuf[i]);
207 
208  av_freep(&s->decoded_buffer);
209  av_freep(&s->data);
210  s->decoded_size = s->data_size = 0;
211 
212  return 0;
213 }
214 
216 {
218  int i;
219 
220  if (avctx->extradata_size != 6) {
221  av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
222  return AVERROR(EINVAL);
223  }
224  if (avctx->channels > 2) {
225  av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
226  return AVERROR(EINVAL);
227  }
228  s->bps = avctx->bits_per_coded_sample;
229  switch (s->bps) {
230  case 8:
232  break;
233  case 16:
235  break;
236  case 24:
238  break;
239  default:
241  "%d bits per coded sample", s->bps);
242  return AVERROR_PATCHWELCOME;
243  }
244  s->avctx = avctx;
245  s->channels = avctx->channels;
246  s->fileversion = AV_RL16(avctx->extradata);
247  s->compression_level = AV_RL16(avctx->extradata + 2);
248  s->flags = AV_RL16(avctx->extradata + 4);
249 
250  av_log(avctx, AV_LOG_VERBOSE, "Compression Level: %d - Flags: %d\n",
251  s->compression_level, s->flags);
252  if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE ||
253  !s->compression_level ||
254  (s->fileversion < 3930 && s->compression_level == COMPRESSION_LEVEL_INSANE)) {
255  av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
256  s->compression_level);
257  return AVERROR_INVALIDDATA;
258  }
259  s->fset = s->compression_level / 1000 - 1;
260  for (i = 0; i < APE_FILTER_LEVELS; i++) {
261  if (!ape_filter_orders[s->fset][i])
262  break;
263  FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
264  (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
265  filter_alloc_fail);
266  }
267 
268  if (s->fileversion < 3860) {
269  s->entropy_decode_mono = entropy_decode_mono_0000;
270  s->entropy_decode_stereo = entropy_decode_stereo_0000;
271  } else if (s->fileversion < 3900) {
272  s->entropy_decode_mono = entropy_decode_mono_3860;
273  s->entropy_decode_stereo = entropy_decode_stereo_3860;
274  } else if (s->fileversion < 3930) {
275  s->entropy_decode_mono = entropy_decode_mono_3900;
276  s->entropy_decode_stereo = entropy_decode_stereo_3900;
277  } else if (s->fileversion < 3990) {
278  s->entropy_decode_mono = entropy_decode_mono_3900;
279  s->entropy_decode_stereo = entropy_decode_stereo_3930;
280  } else {
281  s->entropy_decode_mono = entropy_decode_mono_3990;
282  s->entropy_decode_stereo = entropy_decode_stereo_3990;
283  }
284 
285  if (s->fileversion < 3930) {
286  s->predictor_decode_mono = predictor_decode_mono_3800;
287  s->predictor_decode_stereo = predictor_decode_stereo_3800;
288  } else if (s->fileversion < 3950) {
289  s->predictor_decode_mono = predictor_decode_mono_3930;
290  s->predictor_decode_stereo = predictor_decode_stereo_3930;
291  } else {
292  s->predictor_decode_mono = predictor_decode_mono_3950;
293  s->predictor_decode_stereo = predictor_decode_stereo_3950;
294  }
295 
296  ff_bswapdsp_init(&s->bdsp);
297  ff_llauddsp_init(&s->adsp);
299 
300  return 0;
301 filter_alloc_fail:
303  return AVERROR(ENOMEM);
304 }
305 
306 /**
307  * @name APE range decoding functions
308  * @{
309  */
310 
311 #define CODE_BITS 32
312 #define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1))
313 #define SHIFT_BITS (CODE_BITS - 9)
314 #define EXTRA_BITS ((CODE_BITS-2) % 8 + 1)
315 #define BOTTOM_VALUE (TOP_VALUE >> 8)
316 
317 /** Start the decoder */
318 static inline void range_start_decoding(APEContext *ctx)
319 {
320  ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
321  ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
322  ctx->rc.range = (uint32_t) 1 << EXTRA_BITS;
323 }
324 
325 /** Perform normalization */
326 static inline void range_dec_normalize(APEContext *ctx)
327 {
328  while (ctx->rc.range <= BOTTOM_VALUE) {
329  ctx->rc.buffer <<= 8;
330  if(ctx->ptr < ctx->data_end) {
331  ctx->rc.buffer += *ctx->ptr;
332  ctx->ptr++;
333  } else {
334  ctx->error = 1;
335  }
336  ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
337  ctx->rc.range <<= 8;
338  }
339 }
340 
341 /**
342  * Calculate cumulative frequency for next symbol. Does NO update!
343  * @param ctx decoder context
344  * @param tot_f is the total frequency or (code_value)1<<shift
345  * @return the cumulative frequency
346  */
347 static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
348 {
350  ctx->rc.help = ctx->rc.range / tot_f;
351  return ctx->rc.low / ctx->rc.help;
352 }
353 
354 /**
355  * Decode value with given size in bits
356  * @param ctx decoder context
357  * @param shift number of bits to decode
358  */
359 static inline int range_decode_culshift(APEContext *ctx, int shift)
360 {
362  ctx->rc.help = ctx->rc.range >> shift;
363  return ctx->rc.low / ctx->rc.help;
364 }
365 
366 
367 /**
368  * Update decoding state
369  * @param ctx decoder context
370  * @param sy_f the interval length (frequency of the symbol)
371  * @param lt_f the lower end (frequency sum of < symbols)
372  */
373 static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
374 {
375  ctx->rc.low -= ctx->rc.help * lt_f;
376  ctx->rc.range = ctx->rc.help * sy_f;
377 }
378 
379 /** Decode n bits (n <= 16) without modelling */
380 static inline int range_decode_bits(APEContext *ctx, int n)
381 {
382  int sym = range_decode_culshift(ctx, n);
383  range_decode_update(ctx, 1, sym);
384  return sym;
385 }
386 
387 
388 #define MODEL_ELEMENTS 64
389 
390 /**
391  * Fixed probabilities for symbols in Monkey Audio version 3.97
392  */
393 static const uint16_t counts_3970[22] = {
394  0, 14824, 28224, 39348, 47855, 53994, 58171, 60926,
395  62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419,
396  65450, 65469, 65480, 65487, 65491, 65493,
397 };
398 
399 /**
400  * Probability ranges for symbols in Monkey Audio version 3.97
401  */
402 static const uint16_t counts_diff_3970[21] = {
403  14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756,
404  1104, 677, 415, 248, 150, 89, 54, 31,
405  19, 11, 7, 4, 2,
406 };
407 
408 /**
409  * Fixed probabilities for symbols in Monkey Audio version 3.98
410  */
411 static const uint16_t counts_3980[22] = {
412  0, 19578, 36160, 48417, 56323, 60899, 63265, 64435,
413  64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482,
414  65485, 65488, 65490, 65491, 65492, 65493,
415 };
416 
417 /**
418  * Probability ranges for symbols in Monkey Audio version 3.98
419  */
420 static const uint16_t counts_diff_3980[21] = {
421  19578, 16582, 12257, 7906, 4576, 2366, 1170, 536,
422  261, 119, 65, 31, 19, 10, 6, 3,
423  3, 2, 1, 1, 1,
424 };
425 
426 /**
427  * Decode symbol
428  * @param ctx decoder context
429  * @param counts probability range start position
430  * @param counts_diff probability range widths
431  */
432 static inline int range_get_symbol(APEContext *ctx,
433  const uint16_t counts[],
434  const uint16_t counts_diff[])
435 {
436  int symbol, cf;
437 
438  cf = range_decode_culshift(ctx, 16);
439 
440  if(cf > 65492){
441  symbol= cf - 65535 + 63;
442  range_decode_update(ctx, 1, cf);
443  if(cf > 65535)
444  ctx->error=1;
445  return symbol;
446  }
447  /* figure out the symbol inefficiently; a binary search would be much better */
448  for (symbol = 0; counts[symbol + 1] <= cf; symbol++);
449 
450  range_decode_update(ctx, counts_diff[symbol], counts[symbol]);
451 
452  return symbol;
453 }
454 /** @} */ // group rangecoder
455 
456 static inline void update_rice(APERice *rice, unsigned int x)
457 {
458  int lim = rice->k ? (1 << (rice->k + 4)) : 0;
459  rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
460 
461  if (rice->ksum < lim)
462  rice->k--;
463  else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
464  rice->k++;
465 }
466 
467 static inline int get_rice_ook(GetBitContext *gb, int k)
468 {
469  unsigned int x;
470 
471  x = get_unary(gb, 1, get_bits_left(gb));
472 
473  if (k)
474  x = (x << k) | get_bits(gb, k);
475 
476  return x;
477 }
478 
480  APERice *rice)
481 {
482  unsigned int x, overflow;
483 
485 
486  if (ctx->fileversion > 3880) {
487  while (overflow >= 16) {
488  overflow -= 16;
489  rice->k += 4;
490  }
491  }
492 
493  if (!rice->k)
494  x = overflow;
495  else if(rice->k <= MIN_CACHE_BITS) {
496  x = (overflow << rice->k) + get_bits(gb, rice->k);
497  } else {
498  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %"PRIu32"\n", rice->k);
499  return AVERROR_INVALIDDATA;
500  }
501  rice->ksum += x - (rice->ksum + 8 >> 4);
502  if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0))
503  rice->k--;
504  else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
505  rice->k++;
506 
507  /* Convert to signed */
508  return ((x >> 1) ^ ((x & 1) - 1)) + 1;
509 }
510 
511 static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice)
512 {
513  unsigned int x, overflow;
514  int tmpk;
515 
517 
518  if (overflow == (MODEL_ELEMENTS - 1)) {
519  tmpk = range_decode_bits(ctx, 5);
520  overflow = 0;
521  } else
522  tmpk = (rice->k < 1) ? 0 : rice->k - 1;
523 
524  if (tmpk <= 16 || ctx->fileversion < 3910) {
525  if (tmpk > 23) {
526  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
527  return AVERROR_INVALIDDATA;
528  }
529  x = range_decode_bits(ctx, tmpk);
530  } else if (tmpk <= 31) {
531  x = range_decode_bits(ctx, 16);
532  x |= (range_decode_bits(ctx, tmpk - 16) << 16);
533  } else {
534  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
535  return AVERROR_INVALIDDATA;
536  }
537  x += overflow << tmpk;
538 
539  update_rice(rice, x);
540 
541  /* Convert to signed */
542  return ((x >> 1) ^ ((x & 1) - 1)) + 1;
543 }
544 
545 static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice)
546 {
547  unsigned int x, overflow;
548  int base, pivot;
549 
550  pivot = rice->ksum >> 5;
551  if (pivot == 0)
552  pivot = 1;
553 
555 
556  if (overflow == (MODEL_ELEMENTS - 1)) {
557  overflow = (unsigned)range_decode_bits(ctx, 16) << 16;
559  }
560 
561  if (pivot < 0x10000) {
562  base = range_decode_culfreq(ctx, pivot);
564  } else {
565  int base_hi = pivot, base_lo;
566  int bbits = 0;
567 
568  while (base_hi & ~0xFFFF) {
569  base_hi >>= 1;
570  bbits++;
571  }
572  base_hi = range_decode_culfreq(ctx, base_hi + 1);
573  range_decode_update(ctx, 1, base_hi);
574  base_lo = range_decode_culfreq(ctx, 1 << bbits);
575  range_decode_update(ctx, 1, base_lo);
576 
577  base = (base_hi << bbits) + base_lo;
578  }
579 
580  x = base + overflow * pivot;
581 
582  update_rice(rice, x);
583 
584  /* Convert to signed */
585  return ((x >> 1) ^ ((x & 1) - 1)) + 1;
586 }
587 
589  int32_t *out, APERice *rice, int blockstodecode)
590 {
591  int i;
592  unsigned ksummax, ksummin;
593 
594  rice->ksum = 0;
595  for (i = 0; i < FFMIN(blockstodecode, 5); i++) {
596  out[i] = get_rice_ook(&ctx->gb, 10);
597  rice->ksum += out[i];
598  }
599  rice->k = av_log2(rice->ksum / 10) + 1;
600  if (rice->k >= 24)
601  return;
602  for (; i < FFMIN(blockstodecode, 64); i++) {
603  out[i] = get_rice_ook(&ctx->gb, rice->k);
604  rice->ksum += out[i];
605  rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1;
606  if (rice->k >= 24)
607  return;
608  }
609  ksummax = 1 << rice->k + 7;
610  ksummin = rice->k ? (1 << rice->k + 6) : 0;
611  for (; i < blockstodecode; i++) {
612  out[i] = get_rice_ook(&ctx->gb, rice->k);
613  rice->ksum += out[i] - (unsigned)out[i - 64];
614  while (rice->ksum < ksummin) {
615  rice->k--;
616  ksummin = rice->k ? ksummin >> 1 : 0;
617  ksummax >>= 1;
618  }
619  while (rice->ksum >= ksummax) {
620  rice->k++;
621  if (rice->k > 24)
622  return;
623  ksummax <<= 1;
624  ksummin = ksummin ? ksummin << 1 : 128;
625  }
626  }
627 
628  for (i = 0; i < blockstodecode; i++)
629  out[i] = ((out[i] >> 1) ^ ((out[i] & 1) - 1)) + 1;
630 }
631 
632 static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
633 {
634  decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
635  blockstodecode);
636 }
637 
638 static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
639 {
640  decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
641  blockstodecode);
642  decode_array_0000(ctx, &ctx->gb, ctx->decoded[1], &ctx->riceX,
643  blockstodecode);
644 }
645 
646 static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
647 {
648  int32_t *decoded0 = ctx->decoded[0];
649 
650  while (blockstodecode--)
651  *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
652 }
653 
654 static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
655 {
656  int32_t *decoded0 = ctx->decoded[0];
657  int32_t *decoded1 = ctx->decoded[1];
658  int blocks = blockstodecode;
659 
660  while (blockstodecode--)
661  *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
662  while (blocks--)
663  *decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX);
664 }
665 
666 static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
667 {
668  int32_t *decoded0 = ctx->decoded[0];
669 
670  while (blockstodecode--)
671  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
672 }
673 
674 static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
675 {
676  int32_t *decoded0 = ctx->decoded[0];
677  int32_t *decoded1 = ctx->decoded[1];
678  int blocks = blockstodecode;
679 
680  while (blockstodecode--)
681  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
683  // because of some implementation peculiarities we need to backpedal here
684  ctx->ptr -= 1;
686  while (blocks--)
687  *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
688 }
689 
690 static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
691 {
692  int32_t *decoded0 = ctx->decoded[0];
693  int32_t *decoded1 = ctx->decoded[1];
694 
695  while (blockstodecode--) {
696  *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
697  *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
698  }
699 }
700 
701 static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
702 {
703  int32_t *decoded0 = ctx->decoded[0];
704 
705  while (blockstodecode--)
706  *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
707 }
708 
709 static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
710 {
711  int32_t *decoded0 = ctx->decoded[0];
712  int32_t *decoded1 = ctx->decoded[1];
713 
714  while (blockstodecode--) {
715  *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
716  *decoded1++ = ape_decode_value_3990(ctx, &ctx->riceX);
717  }
718 }
719 
721 {
722  /* Read the CRC */
723  if (ctx->fileversion >= 3900) {
724  if (ctx->data_end - ctx->ptr < 6)
725  return AVERROR_INVALIDDATA;
726  ctx->CRC = bytestream_get_be32(&ctx->ptr);
727  } else {
728  ctx->CRC = get_bits_long(&ctx->gb, 32);
729  }
730 
731  /* Read the frame flags if they exist */
732  ctx->frameflags = 0;
733  if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
734  ctx->CRC &= ~0x80000000;
735 
736  if (ctx->data_end - ctx->ptr < 6)
737  return AVERROR_INVALIDDATA;
738  ctx->frameflags = bytestream_get_be32(&ctx->ptr);
739  }
740 
741  /* Initialize the rice structs */
742  ctx->riceX.k = 10;
743  ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
744  ctx->riceY.k = 10;
745  ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;
746 
747  if (ctx->fileversion >= 3900) {
748  /* The first 8 bits of input are ignored. */
749  ctx->ptr++;
750 
752  }
753 
754  return 0;
755 }
756 
758  375,
759 };
760 
761 static const int32_t initial_coeffs_a_3800[3] = {
762  64, 115, 64,
763 };
764 
765 static const int32_t initial_coeffs_b_3800[2] = {
766  740, 0
767 };
768 
769 static const int32_t initial_coeffs_3930[4] = {
770  360, 317, -109, 98
771 };
772 
774 {
775  APEPredictor *p = &ctx->predictor;
776 
777  /* Zero the history buffers */
778  memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer));
779  p->buf = p->historybuffer;
780 
781  /* Initialize and zero the coefficients */
782  if (ctx->fileversion < 3930) {
783  if (ctx->compression_level == COMPRESSION_LEVEL_FAST) {
784  memcpy(p->coeffsA[0], initial_coeffs_fast_3320,
785  sizeof(initial_coeffs_fast_3320));
786  memcpy(p->coeffsA[1], initial_coeffs_fast_3320,
787  sizeof(initial_coeffs_fast_3320));
788  } else {
789  memcpy(p->coeffsA[0], initial_coeffs_a_3800,
790  sizeof(initial_coeffs_a_3800));
791  memcpy(p->coeffsA[1], initial_coeffs_a_3800,
792  sizeof(initial_coeffs_a_3800));
793  }
794  } else {
795  memcpy(p->coeffsA[0], initial_coeffs_3930, sizeof(initial_coeffs_3930));
796  memcpy(p->coeffsA[1], initial_coeffs_3930, sizeof(initial_coeffs_3930));
797  }
798  memset(p->coeffsB, 0, sizeof(p->coeffsB));
799  if (ctx->fileversion < 3930) {
800  memcpy(p->coeffsB[0], initial_coeffs_b_3800,
801  sizeof(initial_coeffs_b_3800));
802  memcpy(p->coeffsB[1], initial_coeffs_b_3800,
803  sizeof(initial_coeffs_b_3800));
804  }
805 
806  p->filterA[0] = p->filterA[1] = 0;
807  p->filterB[0] = p->filterB[1] = 0;
808  p->lastA[0] = p->lastA[1] = 0;
809 
810  p->sample_pos = 0;
811 }
812 
813 /** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */
814 static inline int APESIGN(int32_t x) {
815  return (x < 0) - (x > 0);
816 }
817 
819  const int decoded, const int filter,
820  const int delayA)
821 {
822  int32_t predictionA;
823 
824  p->buf[delayA] = p->lastA[filter];
825  if (p->sample_pos < 3) {
826  p->lastA[filter] = decoded;
827  p->filterA[filter] = decoded;
828  return decoded;
829  }
830 
831  predictionA = p->buf[delayA] * 2 - p->buf[delayA - 1];
832  p->lastA[filter] = decoded + (predictionA * p->coeffsA[filter][0] >> 9);
833 
834  if ((decoded ^ predictionA) > 0)
835  p->coeffsA[filter][0]++;
836  else
837  p->coeffsA[filter][0]--;
838 
839  p->filterA[filter] += (unsigned)p->lastA[filter];
840 
841  return p->filterA[filter];
842 }
843 
845  const int decoded, const int filter,
846  const int delayA, const int delayB,
847  const int start, const int shift)
848 {
849  int32_t predictionA, predictionB, sign;
850  int32_t d0, d1, d2, d3, d4;
851 
852  p->buf[delayA] = p->lastA[filter];
853  p->buf[delayB] = p->filterB[filter];
854  if (p->sample_pos < start) {
855  predictionA = decoded + p->filterA[filter];
856  p->lastA[filter] = decoded;
857  p->filterB[filter] = decoded;
858  p->filterA[filter] = predictionA;
859  return predictionA;
860  }
861  d2 = p->buf[delayA];
862  d1 = (p->buf[delayA] - (unsigned)p->buf[delayA - 1]) * 2;
863  d0 = p->buf[delayA] + ((p->buf[delayA - 2] - (unsigned)p->buf[delayA - 1]) * 8);
864  d3 = p->buf[delayB] * 2U - p->buf[delayB - 1];
865  d4 = p->buf[delayB];
866 
867  predictionA = d0 * p->coeffsA[filter][0] +
868  d1 * p->coeffsA[filter][1] +
869  d2 * p->coeffsA[filter][2];
870 
871  sign = APESIGN(decoded);
872  p->coeffsA[filter][0] += (((d0 >> 30) & 2) - 1) * sign;
873  p->coeffsA[filter][1] += (((d1 >> 28) & 8) - 4) * sign;
874  p->coeffsA[filter][2] += (((d2 >> 28) & 8) - 4) * sign;
875 
876  predictionB = d3 * p->coeffsB[filter][0] -
877  d4 * p->coeffsB[filter][1];
878  p->lastA[filter] = decoded + (predictionA >> 11);
879  sign = APESIGN(p->lastA[filter]);
880  p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign;
881  p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign;
882 
883  p->filterB[filter] = p->lastA[filter] + (predictionB >> shift);
884  p->filterA[filter] = p->filterB[filter] + (unsigned)((int)(p->filterA[filter] * 31U) >> 5);
885 
886  return p->filterA[filter];
887 }
888 
889 static void long_filter_high_3800(int32_t *buffer, int order, int shift, int length)
890 {
891  int i, j;
892  int32_t dotprod, sign;
893  int32_t coeffs[256], delay[256];
894 
895  if (order >= length)
896  return;
897 
898  memset(coeffs, 0, order * sizeof(*coeffs));
899  for (i = 0; i < order; i++)
900  delay[i] = buffer[i];
901  for (i = order; i < length; i++) {
902  dotprod = 0;
903  sign = APESIGN(buffer[i]);
904  for (j = 0; j < order; j++) {
905  dotprod += delay[j] * (unsigned)coeffs[j];
906  coeffs[j] += ((delay[j] >> 31) | 1) * sign;
907  }
908  buffer[i] -= (unsigned)(dotprod >> shift);
909  for (j = 0; j < order - 1; j++)
910  delay[j] = delay[j + 1];
911  delay[order - 1] = buffer[i];
912  }
913 }
914 
916 {
917  int i, j;
918  int32_t dotprod, sign;
919  int32_t delay[8] = { 0 };
920  uint32_t coeffs[8] = { 0 };
921 
922  for (i = 0; i < length; i++) {
923  dotprod = 0;
924  sign = APESIGN(buffer[i]);
925  for (j = 7; j >= 0; j--) {
926  dotprod += delay[j] * coeffs[j];
927  coeffs[j] += ((delay[j] >> 31) | 1) * sign;
928  }
929  for (j = 7; j > 0; j--)
930  delay[j] = delay[j - 1];
931  delay[0] = buffer[i];
932  buffer[i] -= (unsigned)(dotprod >> 9);
933  }
934 }
935 
937 {
938  APEPredictor *p = &ctx->predictor;
939  int32_t *decoded0 = ctx->decoded[0];
940  int32_t *decoded1 = ctx->decoded[1];
941  int start = 4, shift = 10;
942 
943  if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) {
944  start = 16;
945  long_filter_high_3800(decoded0, 16, 9, count);
946  long_filter_high_3800(decoded1, 16, 9, count);
947  } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
948  int order = 128, shift2 = 11;
949 
950  if (ctx->fileversion >= 3830) {
951  order <<= 1;
952  shift++;
953  shift2++;
954  long_filter_ehigh_3830(decoded0 + order, count - order);
955  long_filter_ehigh_3830(decoded1 + order, count - order);
956  }
957  start = order;
958  long_filter_high_3800(decoded0, order, shift2, count);
959  long_filter_high_3800(decoded1, order, shift2, count);
960  }
961 
962  while (count--) {
963  int X = *decoded0, Y = *decoded1;
964  if (ctx->compression_level == COMPRESSION_LEVEL_FAST) {
965  *decoded0 = filter_fast_3320(p, Y, 0, YDELAYA);
966  decoded0++;
967  *decoded1 = filter_fast_3320(p, X, 1, XDELAYA);
968  decoded1++;
969  } else {
970  *decoded0 = filter_3800(p, Y, 0, YDELAYA, YDELAYB,
971  start, shift);
972  decoded0++;
973  *decoded1 = filter_3800(p, X, 1, XDELAYA, XDELAYB,
974  start, shift);
975  decoded1++;
976  }
977 
978  /* Combined */
979  p->buf++;
980  p->sample_pos++;
981 
982  /* Have we filled the history buffer? */
983  if (p->buf == p->historybuffer + HISTORY_SIZE) {
984  memmove(p->historybuffer, p->buf,
985  PREDICTOR_SIZE * sizeof(*p->historybuffer));
986  p->buf = p->historybuffer;
987  }
988  }
989 }
990 
992 {
993  APEPredictor *p = &ctx->predictor;
994  int32_t *decoded0 = ctx->decoded[0];
995  int start = 4, shift = 10;
996 
997  if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) {
998  start = 16;
999  long_filter_high_3800(decoded0, 16, 9, count);
1000  } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
1001  int order = 128, shift2 = 11;
1002 
1003  if (ctx->fileversion >= 3830) {
1004  order <<= 1;
1005  shift++;
1006  shift2++;
1007  long_filter_ehigh_3830(decoded0 + order, count - order);
1008  }
1009  start = order;
1010  long_filter_high_3800(decoded0, order, shift2, count);
1011  }
1012 
1013  while (count--) {
1014  if (ctx->compression_level == COMPRESSION_LEVEL_FAST) {
1015  *decoded0 = filter_fast_3320(p, *decoded0, 0, YDELAYA);
1016  decoded0++;
1017  } else {
1018  *decoded0 = filter_3800(p, *decoded0, 0, YDELAYA, YDELAYB,
1019  start, shift);
1020  decoded0++;
1021  }
1022 
1023  /* Combined */
1024  p->buf++;
1025  p->sample_pos++;
1026 
1027  /* Have we filled the history buffer? */
1028  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1029  memmove(p->historybuffer, p->buf,
1030  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1031  p->buf = p->historybuffer;
1032  }
1033  }
1034 }
1035 
1037  const int decoded, const int filter,
1038  const int delayA)
1039 {
1040  int32_t predictionA, sign;
1041  uint32_t d0, d1, d2, d3;
1042 
1043  p->buf[delayA] = p->lastA[filter];
1044  d0 = p->buf[delayA ];
1045  d1 = p->buf[delayA ] - (unsigned)p->buf[delayA - 1];
1046  d2 = p->buf[delayA - 1] - (unsigned)p->buf[delayA - 2];
1047  d3 = p->buf[delayA - 2] - (unsigned)p->buf[delayA - 3];
1048 
1049  predictionA = d0 * p->coeffsA[filter][0] +
1050  d1 * p->coeffsA[filter][1] +
1051  d2 * p->coeffsA[filter][2] +
1052  d3 * p->coeffsA[filter][3];
1053 
1054  p->lastA[filter] = decoded + (predictionA >> 9);
1055  p->filterA[filter] = p->lastA[filter] + ((int)(p->filterA[filter] * 31U) >> 5);
1056 
1057  sign = APESIGN(decoded);
1058  p->coeffsA[filter][0] += (((int32_t)d0 < 0) * 2 - 1) * sign;
1059  p->coeffsA[filter][1] += (((int32_t)d1 < 0) * 2 - 1) * sign;
1060  p->coeffsA[filter][2] += (((int32_t)d2 < 0) * 2 - 1) * sign;
1061  p->coeffsA[filter][3] += (((int32_t)d3 < 0) * 2 - 1) * sign;
1062 
1063  return p->filterA[filter];
1064 }
1065 
1067 {
1068  APEPredictor *p = &ctx->predictor;
1069  int32_t *decoded0 = ctx->decoded[0];
1070  int32_t *decoded1 = ctx->decoded[1];
1071 
1072  ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1073 
1074  while (count--) {
1075  /* Predictor Y */
1076  int Y = *decoded1, X = *decoded0;
1077  *decoded0 = predictor_update_3930(p, Y, 0, YDELAYA);
1078  decoded0++;
1079  *decoded1 = predictor_update_3930(p, X, 1, XDELAYA);
1080  decoded1++;
1081 
1082  /* Combined */
1083  p->buf++;
1084 
1085  /* Have we filled the history buffer? */
1086  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1087  memmove(p->historybuffer, p->buf,
1088  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1089  p->buf = p->historybuffer;
1090  }
1091  }
1092 }
1093 
1095 {
1096  APEPredictor *p = &ctx->predictor;
1097  int32_t *decoded0 = ctx->decoded[0];
1098 
1099  ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1100 
1101  while (count--) {
1102  *decoded0 = predictor_update_3930(p, *decoded0, 0, YDELAYA);
1103  decoded0++;
1104 
1105  p->buf++;
1106 
1107  /* Have we filled the history buffer? */
1108  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1109  memmove(p->historybuffer, p->buf,
1110  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1111  p->buf = p->historybuffer;
1112  }
1113  }
1114 }
1115 
1117  const int decoded, const int filter,
1118  const int delayA, const int delayB,
1119  const int adaptA, const int adaptB)
1120 {
1121  int32_t predictionA, predictionB, sign;
1122 
1123  p->buf[delayA] = p->lastA[filter];
1124  p->buf[adaptA] = APESIGN(p->buf[delayA]);
1125  p->buf[delayA - 1] = p->buf[delayA] - (unsigned)p->buf[delayA - 1];
1126  p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);
1127 
1128  predictionA = p->buf[delayA ] * p->coeffsA[filter][0] +
1129  p->buf[delayA - 1] * p->coeffsA[filter][1] +
1130  p->buf[delayA - 2] * p->coeffsA[filter][2] +
1131  p->buf[delayA - 3] * p->coeffsA[filter][3];
1132 
1133  /* Apply a scaled first-order filter compression */
1134  p->buf[delayB] = p->filterA[filter ^ 1] - ((int)(p->filterB[filter] * 31U) >> 5);
1135  p->buf[adaptB] = APESIGN(p->buf[delayB]);
1136  p->buf[delayB - 1] = p->buf[delayB] - (unsigned)p->buf[delayB - 1];
1137  p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);
1138  p->filterB[filter] = p->filterA[filter ^ 1];
1139 
1140  predictionB = p->buf[delayB ] * p->coeffsB[filter][0] +
1141  p->buf[delayB - 1] * p->coeffsB[filter][1] +
1142  p->buf[delayB - 2] * p->coeffsB[filter][2] +
1143  p->buf[delayB - 3] * p->coeffsB[filter][3] +
1144  p->buf[delayB - 4] * p->coeffsB[filter][4];
1145 
1146  p->lastA[filter] = decoded + ((int)((unsigned)predictionA + (predictionB >> 1)) >> 10);
1147  p->filterA[filter] = p->lastA[filter] + ((int)(p->filterA[filter] * 31U) >> 5);
1148 
1149  sign = APESIGN(decoded);
1150  p->coeffsA[filter][0] += p->buf[adaptA ] * sign;
1151  p->coeffsA[filter][1] += p->buf[adaptA - 1] * sign;
1152  p->coeffsA[filter][2] += p->buf[adaptA - 2] * sign;
1153  p->coeffsA[filter][3] += p->buf[adaptA - 3] * sign;
1154  p->coeffsB[filter][0] += p->buf[adaptB ] * sign;
1155  p->coeffsB[filter][1] += p->buf[adaptB - 1] * sign;
1156  p->coeffsB[filter][2] += p->buf[adaptB - 2] * sign;
1157  p->coeffsB[filter][3] += p->buf[adaptB - 3] * sign;
1158  p->coeffsB[filter][4] += p->buf[adaptB - 4] * sign;
1159 
1160  return p->filterA[filter];
1161 }
1162 
1164 {
1165  APEPredictor *p = &ctx->predictor;
1166  int32_t *decoded0 = ctx->decoded[0];
1167  int32_t *decoded1 = ctx->decoded[1];
1168 
1169  ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
1170 
1171  while (count--) {
1172  /* Predictor Y */
1173  *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
1175  decoded0++;
1176  *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
1178  decoded1++;
1179 
1180  /* Combined */
1181  p->buf++;
1182 
1183  /* Have we filled the history buffer? */
1184  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1185  memmove(p->historybuffer, p->buf,
1186  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1187  p->buf = p->historybuffer;
1188  }
1189  }
1190 }
1191 
1193 {
1194  APEPredictor *p = &ctx->predictor;
1195  int32_t *decoded0 = ctx->decoded[0];
1196  int32_t predictionA, currentA, A, sign;
1197 
1198  ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
1199 
1200  currentA = p->lastA[0];
1201 
1202  while (count--) {
1203  A = *decoded0;
1204 
1205  p->buf[YDELAYA] = currentA;
1206  p->buf[YDELAYA - 1] = p->buf[YDELAYA] - (unsigned)p->buf[YDELAYA - 1];
1207 
1208  predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] +
1209  p->buf[YDELAYA - 1] * p->coeffsA[0][1] +
1210  p->buf[YDELAYA - 2] * p->coeffsA[0][2] +
1211  p->buf[YDELAYA - 3] * p->coeffsA[0][3];
1212 
1213  currentA = A + (unsigned)(predictionA >> 10);
1214 
1215  p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]);
1216  p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);
1217 
1218  sign = APESIGN(A);
1219  p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign;
1220  p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign;
1221  p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign;
1222  p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign;
1223 
1224  p->buf++;
1225 
1226  /* Have we filled the history buffer? */
1227  if (p->buf == p->historybuffer + HISTORY_SIZE) {
1228  memmove(p->historybuffer, p->buf,
1229  PREDICTOR_SIZE * sizeof(*p->historybuffer));
1230  p->buf = p->historybuffer;
1231  }
1232 
1233  p->filterA[0] = currentA + (unsigned)((int)(p->filterA[0] * 31U) >> 5);
1234  *(decoded0++) = p->filterA[0];
1235  }
1236 
1237  p->lastA[0] = currentA;
1238 }
1239 
1240 static void do_init_filter(APEFilter *f, int16_t *buf, int order)
1241 {
1242  f->coeffs = buf;
1243  f->historybuffer = buf + order;
1244  f->delay = f->historybuffer + order * 2;
1245  f->adaptcoeffs = f->historybuffer + order;
1246 
1247  memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer));
1248  memset(f->coeffs, 0, order * sizeof(*f->coeffs));
1249  f->avg = 0;
1250 }
1251 
1252 static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
1253 {
1254  do_init_filter(&f[0], buf, order);
1255  do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
1256 }
1257 
1259  int32_t *data, int count, int order, int fracbits)
1260 {
1261  int res;
1262  int absres;
1263 
1264  while (count--) {
1265  /* round fixedpoint scalar product */
1266  res = ctx->adsp.scalarproduct_and_madd_int16(f->coeffs,
1267  f->delay - order,
1268  f->adaptcoeffs - order,
1269  order, APESIGN(*data));
1270  res = (int)(res + (1U << (fracbits - 1))) >> fracbits;
1271  res += (unsigned)*data;
1272  *data++ = res;
1273 
1274  /* Update the output history */
1275  *f->delay++ = av_clip_int16(res);
1276 
1277  if (version < 3980) {
1278  /* Version ??? to < 3.98 files (untested) */
1279  f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
1280  f->adaptcoeffs[-4] >>= 1;
1281  f->adaptcoeffs[-8] >>= 1;
1282  } else {
1283  /* Version 3.98 and later files */
1284 
1285  /* Update the adaption coefficients */
1286  absres = res < 0 ? -(unsigned)res : res;
1287  if (absres)
1288  *f->adaptcoeffs = APESIGN(res) *
1289  (8 << ((absres > f->avg * 3LL) + (absres > (f->avg + f->avg / 3))));
1290  /* equivalent to the following code
1291  if (absres <= f->avg * 4 / 3)
1292  *f->adaptcoeffs = APESIGN(res) * 8;
1293  else if (absres <= f->avg * 3)
1294  *f->adaptcoeffs = APESIGN(res) * 16;
1295  else
1296  *f->adaptcoeffs = APESIGN(res) * 32;
1297  */
1298  else
1299  *f->adaptcoeffs = 0;
1300 
1301  f->avg += (int)(absres - (unsigned)f->avg) / 16;
1302 
1303  f->adaptcoeffs[-1] >>= 1;
1304  f->adaptcoeffs[-2] >>= 1;
1305  f->adaptcoeffs[-8] >>= 1;
1306  }
1307 
1308  f->adaptcoeffs++;
1309 
1310  /* Have we filled the history buffer? */
1311  if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
1312  memmove(f->historybuffer, f->delay - (order * 2),
1313  (order * 2) * sizeof(*f->historybuffer));
1314  f->delay = f->historybuffer + order * 2;
1315  f->adaptcoeffs = f->historybuffer + order;
1316  }
1317  }
1318 }
1319 
1321  int32_t *data0, int32_t *data1,
1322  int count, int order, int fracbits)
1323 {
1324  do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
1325  if (data1)
1326  do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
1327 }
1328 
1329 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
1330  int32_t *decoded1, int count)
1331 {
1332  int i;
1333 
1334  for (i = 0; i < APE_FILTER_LEVELS; i++) {
1335  if (!ape_filter_orders[ctx->fset][i])
1336  break;
1337  apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
1338  ape_filter_orders[ctx->fset][i],
1339  ape_filter_fracbits[ctx->fset][i]);
1340  }
1341 }
1342 
1344 {
1345  int i, ret;
1346  if ((ret = init_entropy_decoder(ctx)) < 0)
1347  return ret;
1349 
1350  for (i = 0; i < APE_FILTER_LEVELS; i++) {
1351  if (!ape_filter_orders[ctx->fset][i])
1352  break;
1353  init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
1354  ape_filter_orders[ctx->fset][i]);
1355  }
1356  return 0;
1357 }
1358 
1360 {
1361  if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
1362  /* We are pure silence, so we're done. */
1363  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
1364  return;
1365  }
1366 
1367  ctx->entropy_decode_mono(ctx, count);
1368 
1369  /* Now apply the predictor decoding */
1370  ctx->predictor_decode_mono(ctx, count);
1371 
1372  /* Pseudo-stereo - just copy left channel to right channel */
1373  if (ctx->channels == 2) {
1374  memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1]));
1375  }
1376 }
1377 
1379 {
1380  unsigned left, right;
1381  int32_t *decoded0 = ctx->decoded[0];
1382  int32_t *decoded1 = ctx->decoded[1];
1383 
1385  /* We are pure silence, so we're done. */
1386  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");
1387  return;
1388  }
1389 
1390  ctx->entropy_decode_stereo(ctx, count);
1391 
1392  /* Now apply the predictor decoding */
1393  ctx->predictor_decode_stereo(ctx, count);
1394 
1395  /* Decorrelate and scale to output depth */
1396  while (count--) {
1397  left = *decoded1 - (unsigned)(*decoded0 / 2);
1398  right = left + *decoded0;
1399 
1400  *(decoded0++) = left;
1401  *(decoded1++) = right;
1402  }
1403 }
1404 
1406  int *got_frame_ptr, AVPacket *avpkt)
1407 {
1408  AVFrame *frame = data;
1409  const uint8_t *buf = avpkt->data;
1411  uint8_t *sample8;
1412  int16_t *sample16;
1413  int32_t *sample24;
1414  int i, ch, ret;
1415  int blockstodecode;
1416  uint64_t decoded_buffer_size;
1417 
1418  /* this should never be negative, but bad things will happen if it is, so
1419  check it just to make sure. */
1420  av_assert0(s->samples >= 0);
1421 
1422  if(!s->samples){
1423  uint32_t nblocks, offset;
1424  int buf_size;
1425 
1426  if (!avpkt->size) {
1427  *got_frame_ptr = 0;
1428  return 0;
1429  }
1430  if (avpkt->size < 8) {
1431  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1432  return AVERROR_INVALIDDATA;
1433  }
1434  buf_size = avpkt->size & ~3;
1435  if (buf_size != avpkt->size) {
1436  av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. "
1437  "extra bytes at the end will be skipped.\n");
1438  }
1439  if (s->fileversion < 3950) // previous versions overread two bytes
1440  buf_size += 2;
1441  av_fast_padded_malloc(&s->data, &s->data_size, buf_size);
1442  if (!s->data)
1443  return AVERROR(ENOMEM);
1444  s->bdsp.bswap_buf((uint32_t *) s->data, (const uint32_t *) buf,
1445  buf_size >> 2);
1446  memset(s->data + (buf_size & ~3), 0, buf_size & 3);
1447  s->ptr = s->data;
1448  s->data_end = s->data + buf_size;
1449 
1450  nblocks = bytestream_get_be32(&s->ptr);
1451  offset = bytestream_get_be32(&s->ptr);
1452  if (s->fileversion >= 3900) {
1453  if (offset > 3) {
1454  av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
1455  av_freep(&s->data);
1456  s->data_size = 0;
1457  return AVERROR_INVALIDDATA;
1458  }
1459  if (s->data_end - s->ptr < offset) {
1460  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
1461  return AVERROR_INVALIDDATA;
1462  }
1463  s->ptr += offset;
1464  } else {
1465  if ((ret = init_get_bits8(&s->gb, s->ptr, s->data_end - s->ptr)) < 0)
1466  return ret;
1467  if (s->fileversion > 3800)
1468  skip_bits_long(&s->gb, offset * 8);
1469  else
1470  skip_bits_long(&s->gb, offset);
1471  }
1472 
1473  if (!nblocks || nblocks > INT_MAX / 2 / sizeof(*s->decoded_buffer) - 8) {
1474  av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %"PRIu32".\n",
1475  nblocks);
1476  return AVERROR_INVALIDDATA;
1477  }
1478 
1479  /* Initialize the frame decoder */
1480  if (init_frame_decoder(s) < 0) {
1481  av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
1482  return AVERROR_INVALIDDATA;
1483  }
1484  s->samples = nblocks;
1485  }
1486 
1487  if (!s->data) {
1488  *got_frame_ptr = 0;
1489  return avpkt->size;
1490  }
1491 
1492  blockstodecode = FFMIN(s->blocks_per_loop, s->samples);
1493  // for old files coefficients were not interleaved,
1494  // so we need to decode all of them at once
1495  if (s->fileversion < 3930)
1496  blockstodecode = s->samples;
1497 
1498  /* reallocate decoded sample buffer if needed */
1499  decoded_buffer_size = 2LL * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer);
1500  av_assert0(decoded_buffer_size <= INT_MAX);
1501  av_fast_malloc(&s->decoded_buffer, &s->decoded_size, decoded_buffer_size);
1502  if (!s->decoded_buffer)
1503  return AVERROR(ENOMEM);
1504  memset(s->decoded_buffer, 0, decoded_buffer_size);
1505  s->decoded[0] = s->decoded_buffer;
1506  s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
1507 
1508  /* get output buffer */
1509  frame->nb_samples = blockstodecode;
1510  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1511  return ret;
1512 
1513  s->error=0;
1514 
1515  if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))
1516  ape_unpack_mono(s, blockstodecode);
1517  else
1518  ape_unpack_stereo(s, blockstodecode);
1519  emms_c();
1520 
1521  if (s->error) {
1522  s->samples=0;
1523  av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
1524  return AVERROR_INVALIDDATA;
1525  }
1526 
1527  switch (s->bps) {
1528  case 8:
1529  for (ch = 0; ch < s->channels; ch++) {
1530  sample8 = (uint8_t *)frame->data[ch];
1531  for (i = 0; i < blockstodecode; i++)
1532  *sample8++ = (s->decoded[ch][i] + 0x80U) & 0xff;
1533  }
1534  break;
1535  case 16:
1536  for (ch = 0; ch < s->channels; ch++) {
1537  sample16 = (int16_t *)frame->data[ch];
1538  for (i = 0; i < blockstodecode; i++)
1539  *sample16++ = s->decoded[ch][i];
1540  }
1541  break;
1542  case 24:
1543  for (ch = 0; ch < s->channels; ch++) {
1544  sample24 = (int32_t *)frame->data[ch];
1545  for (i = 0; i < blockstodecode; i++)
1546  *sample24++ = s->decoded[ch][i] * 256U;
1547  }
1548  break;
1549  }
1550 
1551  s->samples -= blockstodecode;
1552 
1553  *got_frame_ptr = 1;
1554 
1555  return !s->samples ? avpkt->size : 0;
1556 }
1557 
1559 {
1561  s->samples= 0;
1562 }
1563 
1564 #define OFFSET(x) offsetof(APEContext, x)
1565 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
1566 static const AVOption options[] = {
1567  { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" },
1568  { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" },
1569  { NULL},
1570 };
1571 
1572 static const AVClass ape_decoder_class = {
1573  .class_name = "APE decoder",
1574  .item_name = av_default_item_name,
1575  .option = options,
1576  .version = LIBAVUTIL_VERSION_INT,
1577 };
1578 
1580  .name = "ape",
1581  .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
1582  .type = AVMEDIA_TYPE_AUDIO,
1583  .id = AV_CODEC_ID_APE,
1584  .priv_data_size = sizeof(APEContext),
1585  .init = ape_decode_init,
1586  .close = ape_decode_close,
1588  .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY |
1590  .flush = ape_flush,
1591  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
1595  .priv_class = &ape_decoder_class,
1596 };
APEContext::avctx
AVCodecContext * avctx
Definition: apedec.c:138
APEContext::riceX
APERice riceX
rice code parameters for the second channel
Definition: apedec.c:162
entropy_decode_stereo_3860
static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
Definition: apedec.c:654
YADAPTCOEFFSB
#define YADAPTCOEFFSB
Definition: apedec.c:60
AVCodec
AVCodec.
Definition: avcodec.h:3481
bswapdsp.h
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
XDELAYA
#define XDELAYA
Definition: apedec.c:55
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
init_frame_decoder
static int init_frame_decoder(APEContext *ctx)
Definition: apedec.c:1343
APEContext::data
uint8_t * data
current frame data
Definition: apedec.c:167
range_start_decoding
static void range_start_decoding(APEContext *ctx)
Start the decoder.
Definition: apedec.c:318
apply_filter
static void apply_filter(APEContext *ctx, APEFilter *f, int32_t *data0, int32_t *data1, int count, int order, int fracbits)
Definition: apedec.c:1320
PREDICTOR_SIZE
#define PREDICTOR_SIZE
Total size of all predictor histories.
Definition: apedec.c:51
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_CODEC_ID_APE
@ AV_CODEC_ID_APE
Definition: avcodec.h:596
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2276
out
FILE * out
Definition: movenc.c:54
APEContext::filterbuf
int16_t * filterbuf[APE_FILTER_LEVELS]
filter memory
Definition: apedec.c:159
APEPredictor::coeffsB
int32_t coeffsB[2][5]
adaption coefficients
Definition: apedec.c:129
n
int n
Definition: avisynth_c.h:760
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:686
APE_FILTER_LEVELS
#define APE_FILTER_LEVELS
Definition: apedec.c:76
APERice
Definition: apedec.c:107
APERangecoder::low
uint32_t low
low end of interval
Definition: apedec.c:113
predictor_update_filter
static av_always_inline int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB)
Definition: apedec.c:1116
AV_CH_LAYOUT_MONO
#define AV_CH_LAYOUT_MONO
Definition: channel_layout.h:85
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
count
void INT64 INT64 count
Definition: avisynth_c.h:767
init_entropy_decoder
static int init_entropy_decoder(APEContext *ctx)
Definition: apedec.c:720
counts_diff_3980
static const uint16_t counts_diff_3980[21]
Probability ranges for symbols in Monkey Audio version 3.98.
Definition: apedec.c:420
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
ape_decoder_class
static const AVClass ape_decoder_class
Definition: apedec.c:1572
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
entropy_decode_stereo_3930
static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
Definition: apedec.c:690
AVOption
AVOption.
Definition: opt.h:246
predictor_decode_mono_3930
static void predictor_decode_mono_3930(APEContext *ctx, int count)
Definition: apedec.c:1094
APEContext::filters
APEFilter filters[APE_FILTER_LEVELS][2]
filters used for reconstruction
Definition: apedec.c:164
long_filter_ehigh_3830
static void long_filter_ehigh_3830(int32_t *buffer, int length)
Definition: apedec.c:915
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
update_rice
static void update_rice(APERice *rice, unsigned int x)
Definition: apedec.c:456
ff_ape_decoder
AVCodec ff_ape_decoder
Definition: apedec.c:1579
data
const char data[16]
Definition: mxf.c:91
APEContext::CRC
uint32_t CRC
frame CRC
Definition: apedec.c:150
XADAPTCOEFFSA
#define XADAPTCOEFFSA
Definition: apedec.c:59
entropy_decode_mono_3990
static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
Definition: apedec.c:701
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
base
uint8_t base
Definition: vp3data.h:202
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
ape_flush
static void ape_flush(AVCodecContext *avctx)
Definition: apedec.c:1558
APEContext::predictor_decode_mono
void(* predictor_decode_mono)(struct APEContext *ctx, int count)
Definition: apedec.c:176
ape_filter_fracbits
static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS]
Filter fraction bits depending on compression level.
Definition: apedec.c:88
COMPRESSION_LEVEL_HIGH
@ COMPRESSION_LEVEL_HIGH
Definition: apedec.c:70
APEContext::compression_level
int compression_level
compression levels
Definition: apedec.c:146
APEPredictor
Filter histories.
Definition: apedec.c:120
APEContext::entropy_decode_stereo
void(* entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode)
Definition: apedec.c:175
range_decode_bits
static int range_decode_bits(APEContext *ctx, int n)
Decode n bits (n <= 16) without modelling.
Definition: apedec.c:380
YADAPTCOEFFSA
#define YADAPTCOEFFSA
Definition: apedec.c:58
ff_llauddsp_init
av_cold void ff_llauddsp_init(LLAudDSPContext *c)
Definition: lossless_audiodsp.c:56
A
#define A(x)
Definition: vp56_arith.h:28
predictor_decode_stereo_3930
static void predictor_decode_stereo_3930(APEContext *ctx, int count)
Definition: apedec.c:1066
return
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
Definition: filter_design.txt:264
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
predictor_decode_mono_3800
static void predictor_decode_mono_3800(APEContext *ctx, int count)
Definition: apedec.c:991
ape_decode_init
static av_cold int ape_decode_init(AVCodecContext *avctx)
Definition: apedec.c:215
U
#define U(x)
Definition: vp56_arith.h:37
ape_unpack_mono
static void ape_unpack_mono(APEContext *ctx, int count)
Definition: apedec.c:1359
start
void INT64 start
Definition: avisynth_c.h:767
APEContext::fileversion
int fileversion
codec version, very important in decoding process
Definition: apedec.c:145
GetBitContext
Definition: get_bits.h:61
ape_decode_value_3860
static int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb, APERice *rice)
Definition: apedec.c:479
filter_3800
static av_always_inline int filter_3800(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int start, const int shift)
Definition: apedec.c:844
predictor_decode_stereo_3800
static void predictor_decode_stereo_3800(APEContext *ctx, int count)
Definition: apedec.c:936
APEPredictor::filterA
int32_t filterA[2]
Definition: apedec.c:125
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:86
options
static const AVOption options[]
Definition: apedec.c:1566
YDELAYB
#define YDELAYB
Definition: apedec.c:54
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
APEContext::rc
APERangecoder rc
rangecoder used to decode actual values
Definition: apedec.c:161
APEContext::samples
int samples
samples left to decode in current frame
Definition: apedec.c:142
APEContext::ptr
const uint8_t * ptr
current position in frame data
Definition: apedec.c:170
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
APEFilter::historybuffer
int16_t * historybuffer
filter memory
Definition: apedec.c:101
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
MODEL_ELEMENTS
#define MODEL_ELEMENTS
Definition: apedec.c:388
do_init_filter
static void do_init_filter(APEFilter *f, int16_t *buf, int order)
Definition: apedec.c:1240
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
long_filter_high_3800
static void long_filter_high_3800(int32_t *buffer, int order, int shift, int length)
Definition: apedec.c:889
APEContext::decoded_buffer
int32_t * decoded_buffer
Definition: apedec.c:154
APE_FRAMECODE_STEREO_SILENCE
#define APE_FRAMECODE_STEREO_SILENCE
Definition: apedec.c:45
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:90
APERangecoder::buffer
unsigned int buffer
buffer for input/output
Definition: apedec.c:116
APERangecoder
Definition: apedec.c:112
do_apply_filter
static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, int32_t *data, int count, int order, int fracbits)
Definition: apedec.c:1258
LLAudDSPContext
Definition: lossless_audiodsp.h:28
f
#define f(width, name)
Definition: cbs_vp9.c:255
entropy_decode_stereo_0000
static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
Definition: apedec.c:638
version
int version
Definition: avisynth_c.h:858
APEContext::fset
int fset
which filter set to use (calculated from compression level)
Definition: apedec.c:147
int32_t
int32_t
Definition: audio_convert.c:194
COMPRESSION_LEVEL_FAST
@ COMPRESSION_LEVEL_FAST
Definition: apedec.c:68
if
if(ret)
Definition: filter_design.txt:179
OFFSET
#define OFFSET(x)
Definition: apedec.c:1564
ape_decode_value_3900
static int ape_decode_value_3900(APEContext *ctx, APERice *rice)
Definition: apedec.c:511
APEPredictor::historybuffer
int32_t historybuffer[HISTORY_SIZE+PREDICTOR_SIZE]
Definition: apedec.c:130
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
APEContext::frameflags
int frameflags
frame flags
Definition: apedec.c:151
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
APEPredictor::sample_pos
unsigned int sample_pos
Definition: apedec.c:132
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
range_decode_culshift
static int range_decode_culshift(APEContext *ctx, int shift)
Decode value with given size in bits.
Definition: apedec.c:359
entropy_decode_stereo_3900
static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
Definition: apedec.c:674
entropy_decode_mono_3900
static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
Definition: apedec.c:666
counts_3970
static const uint16_t counts_3970[22]
Fixed probabilities for symbols in Monkey Audio version 3.97.
Definition: apedec.c:393
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
predictor_update_3930
static av_always_inline int predictor_update_3930(APEPredictor *p, const int decoded, const int filter, const int delayA)
Definition: apedec.c:1036
init_predictor_decoder
static void init_predictor_decoder(APEContext *ctx)
Definition: apedec.c:773
APEPredictor::coeffsA
int32_t coeffsA[2][4]
adaption coefficients
Definition: apedec.c:128
COMPRESSION_LEVEL_EXTRA_HIGH
@ COMPRESSION_LEVEL_EXTRA_HIGH
Definition: apedec.c:71
APEContext
Decoder context.
Definition: apedec.c:136
range_decode_culfreq
static int range_decode_culfreq(APEContext *ctx, int tot_f)
Calculate cumulative frequency for next symbol.
Definition: apedec.c:347
get_unary
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AV_SAMPLE_FMT_U8P
@ AV_SAMPLE_FMT_U8P
unsigned 8 bits, planar
Definition: samplefmt.h:66
APE_FRAMECODE_PSEUDO_STEREO
#define APE_FRAMECODE_PSEUDO_STEREO
Definition: apedec.c:46
APEContext::entropy_decode_mono
void(* entropy_decode_mono)(struct APEContext *ctx, int blockstodecode)
Definition: apedec.c:174
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2233
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
APERice::ksum
uint32_t ksum
Definition: apedec.c:109
APEFilter::coeffs
int16_t * coeffs
actual coefficients used in filtering
Definition: apedec.c:99
PAR
#define PAR
Definition: apedec.c:1565
APEFilter::delay
int16_t * delay
filtered values
Definition: apedec.c:102
APERangecoder::range
uint32_t range
length of interval
Definition: apedec.c:114
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
initial_coeffs_a_3800
static const int32_t initial_coeffs_a_3800[3]
Definition: apedec.c:761
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
APEContext::predictor_decode_stereo
void(* predictor_decode_stereo)(struct APEContext *ctx, int count)
Definition: apedec.c:177
init_filter
static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
Definition: apedec.c:1252
APEContext::error
int error
Definition: apedec.c:172
ape_decode_value_3990
static int ape_decode_value_3990(APEContext *ctx, APERice *rice)
Definition: apedec.c:545
unary.h
shift2
static const int shift2[6]
Definition: dxa.c:51
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
Y
#define Y
Definition: boxblur.h:38
decode_array_0000
static void decode_array_0000(APEContext *ctx, GetBitContext *gb, int32_t *out, APERice *rice, int blockstodecode)
Definition: apedec.c:588
range_get_symbol
static int range_get_symbol(APEContext *ctx, const uint16_t counts[], const uint16_t counts_diff[])
Decode symbol.
Definition: apedec.c:432
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:2226
ape_unpack_stereo
static void ape_unpack_stereo(APEContext *ctx, int count)
Definition: apedec.c:1378
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2789
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
APEPredictor::buf
int32_t * buf
Definition: apedec.c:121
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
range_decode_update
static void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
Update decoding state.
Definition: apedec.c:373
APEContext::gb
GetBitContext gb
Definition: apedec.c:165
BOTTOM_VALUE
#define BOTTOM_VALUE
Definition: apedec.c:315
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:128
range_dec_normalize
static void range_dec_normalize(APEContext *ctx)
Perform normalization.
Definition: apedec.c:326
initial_coeffs_fast_3320
static const int32_t initial_coeffs_fast_3320[1]
Definition: apedec.c:757
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
COMPRESSION_LEVEL_INSANE
@ COMPRESSION_LEVEL_INSANE
Definition: apedec.c:72
av_always_inline
#define av_always_inline
Definition: attributes.h:43
uint8_t
uint8_t
Definition: audio_convert.c:194
entropy_decode_stereo_3990
static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
Definition: apedec.c:709
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
APERangecoder::help
uint32_t help
bytes_to_follow resp. intermediate value
Definition: apedec.c:115
APECompressionLevel
APECompressionLevel
Possible compression levels.
Definition: apedec.c:67
YDELAYA
#define YDELAYA
Definition: apedec.c:53
entropy_decode_mono_3860
static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
Definition: apedec.c:646
ape_decode_close
static av_cold int ape_decode_close(AVCodecContext *avctx)
Definition: apedec.c:200
avcodec.h
APEContext::bdsp
BswapDSPContext bdsp
Definition: apedec.c:139
predictor_decode_stereo_3950
static void predictor_decode_stereo_3950(APEContext *ctx, int count)
Definition: apedec.c:1163
ret
ret
Definition: filter_design.txt:187
APEContext::adsp
LLAudDSPContext adsp
Definition: apedec.c:140
predictor_decode_mono_3950
static void predictor_decode_mono_3950(APEContext *ctx, int count)
Definition: apedec.c:1192
XDELAYB
#define XDELAYB
Definition: apedec.c:56
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
get_rice_ook
static int get_rice_ook(GetBitContext *gb, int k)
Definition: apedec.c:467
APEContext::data_size
int data_size
frame data allocated size
Definition: apedec.c:169
APEContext::predictor
APEPredictor predictor
predictor used for final reconstruction
Definition: apedec.c:152
lossless_audiodsp.h
APEFilter
Filters applied to the decoded data.
Definition: apedec.c:98
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
APEPredictor::lastA
int32_t lastA[2]
Definition: apedec.c:123
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
counts_3980
static const uint16_t counts_3980[22]
Fixed probabilities for symbols in Monkey Audio version 3.98.
Definition: apedec.c:411
channel_layout.h
HISTORY_SIZE
#define HISTORY_SIZE
Definition: apedec.c:48
COMPRESSION_LEVEL_NORMAL
@ COMPRESSION_LEVEL_NORMAL
Definition: apedec.c:69
counts_diff_3970
static const uint16_t counts_diff_3970[21]
Probability ranges for symbols in Monkey Audio version 3.97.
Definition: apedec.c:402
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
initial_coeffs_b_3800
static const int32_t initial_coeffs_b_3800[2]
Definition: apedec.c:765
APEContext::channels
int channels
Definition: apedec.c:141
APEContext::decoded
int32_t * decoded[MAX_CHANNELS]
decoded data for each channel
Definition: apedec.c:156
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1006
APESIGN
static int APESIGN(int32_t x)
Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero)
Definition: apedec.c:814
APEContext::riceY
APERice riceY
rice code parameters for the first channel
Definition: apedec.c:163
APEContext::data_end
uint8_t * data_end
frame data end
Definition: apedec.c:168
XADAPTCOEFFSB
#define XADAPTCOEFFSB
Definition: apedec.c:61
shift
static int shift(int a, int b)
Definition: sonic.c:82
FF_ALLOC_OR_GOTO
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:140
ape_filter_orders
static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS]
Filter orders depending on compression level.
Definition: apedec.c:79
initial_coeffs_3930
static const int32_t initial_coeffs_3930[4]
Definition: apedec.c:769
MAX_CHANNELS
#define MAX_CHANNELS
Definition: apedec.c:41
AV_CODEC_CAP_SUBFRAMES
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time,...
Definition: avcodec.h:1024
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
ape_decode_frame
static int ape_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: apedec.c:1405
APEContext::bps
int bps
Definition: apedec.c:143
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
APEPredictor::filterB
int32_t filterB[2]
Definition: apedec.c:126
APEContext::blocks_per_loop
int blocks_per_loop
maximum number of samples to decode for each call
Definition: apedec.c:157
filter_fast_3320
static av_always_inline int filter_fast_3320(APEPredictor *p, const int decoded, const int filter, const int delayA)
Definition: apedec.c:818
APEContext::flags
int flags
global decoder flags
Definition: apedec.c:148
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
bytestream.h
APEFilter::adaptcoeffs
int16_t * adaptcoeffs
adaptive filter coefficients used for correcting of actual filter coefficients
Definition: apedec.c:100
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
APEContext::decoded_size
int decoded_size
Definition: apedec.c:155
BswapDSPContext
Definition: bswapdsp.h:24
ape_apply_filters
static void ape_apply_filters(APEContext *ctx, int32_t *decoded0, int32_t *decoded1, int count)
Definition: apedec.c:1329
APEFilter::avg
uint32_t avg
Definition: apedec.c:104
int
int
Definition: ffmpeg_filter.c:191
entropy_decode_mono_0000
static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
Definition: apedec.c:632
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
EXTRA_BITS
#define EXTRA_BITS
Definition: apedec.c:314
APERice::k
uint32_t k
Definition: apedec.c:108