FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 #include "avcodec.h"
38 #include "get_bits.h"
39 #include "bytestream.h"
40 #include "adpcm.h"
41 #include "adpcm_data.h"
42 #include "internal.h"
43 
44 /**
45  * @file
46  * ADPCM decoders
47  * Features and limitations:
48  *
49  * Reference documents:
50  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
51  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
52  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
53  * http://openquicktime.sourceforge.net/
54  * XAnim sources (xa_codec.c) http://xanim.polter.net/
55  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
56  * SoX source code http://sox.sourceforge.net/
57  *
58  * CD-ROM XA:
59  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
60  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
61  * readstr http://www.geocities.co.jp/Playtown/2004/
62  */
63 
64 #define CASE_0(codec_id, ...)
65 #define CASE_1(codec_id, ...) \
66  case codec_id: \
67  { __VA_ARGS__ } \
68  break;
69 #define CASE_2(enabled, codec_id, ...) \
70  CASE_ ## enabled(codec_id, __VA_ARGS__)
71 #define CASE_3(config, codec_id, ...) \
72  CASE_2(config, codec_id, __VA_ARGS__)
73 #define CASE(codec, ...) \
74  CASE_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
75 
76 /* These are for CD-ROM XA ADPCM */
77 static const int8_t xa_adpcm_table[5][2] = {
78  { 0, 0 },
79  { 60, 0 },
80  { 115, -52 },
81  { 98, -55 },
82  { 122, -60 }
83 };
84 
85 static const int16_t afc_coeffs[2][16] = {
86  { 0, 2048, 0, 1024, 4096, 3584, 3072, 4608, 4200, 4800, 5120, 2048, 1024, -1024, -1024, -2048 },
87  { 0, 0, 2048, 1024, -2048, -1536, -1024, -2560, -2248, -2300, -3072, -2048, -1024, 1024, 0, 0 }
88 };
89 
90 static const int16_t ea_adpcm_table[] = {
91  0, 240, 460, 392,
92  0, 0, -208, -220,
93  0, 1, 3, 4,
94  7, 8, 10, 11,
95  0, -1, -3, -4
96 };
97 
98 /*
99  * Dumped from the binaries:
100  * - FantasticJourney.exe - 0x794D2, DGROUP:0x47A4D2
101  * - BigRaceUSA.exe - 0x9B8AA, DGROUP:0x49C4AA
102  * - Timeshock!.exe - 0x8506A, DGROUP:0x485C6A
103  */
104 static const int8_t ima_cunning_index_table[9] = {
105  -1, -1, -1, -1, 1, 2, 3, 4, -1
106 };
107 
108 /*
109  * Dumped from the binaries:
110  * - FantasticJourney.exe - 0x79458, DGROUP:0x47A458
111  * - BigRaceUSA.exe - 0x9B830, DGROUP:0x49C430
112  * - Timeshock!.exe - 0x84FF0, DGROUP:0x485BF0
113  */
114 static const int16_t ima_cunning_step_table[61] = {
115  1, 1, 1, 1, 2, 2, 3, 3, 4, 5,
116  6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
117  32, 40, 48, 56, 64, 80, 96, 112, 128, 160,
118  192, 224, 256, 320, 384, 448, 512, 640, 768, 896,
119  1024, 1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120,
120  6144, 7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576, 28672, 0
121 };
122 
123 static const int8_t adpcm_index_table2[4] = {
124  -1, 2,
125  -1, 2,
126 };
127 
128 static const int8_t adpcm_index_table3[8] = {
129  -1, -1, 1, 2,
130  -1, -1, 1, 2,
131 };
132 
133 static const int8_t adpcm_index_table5[32] = {
134  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
135  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
136 };
137 
138 static const int8_t * const adpcm_index_tables[4] = {
139  &adpcm_index_table2[0],
140  &adpcm_index_table3[0],
142  &adpcm_index_table5[0],
143 };
144 
145 static const int16_t mtaf_stepsize[32][16] = {
146  { 1, 5, 9, 13, 16, 20, 24, 28,
147  -1, -5, -9, -13, -16, -20, -24, -28, },
148  { 2, 6, 11, 15, 20, 24, 29, 33,
149  -2, -6, -11, -15, -20, -24, -29, -33, },
150  { 2, 7, 13, 18, 23, 28, 34, 39,
151  -2, -7, -13, -18, -23, -28, -34, -39, },
152  { 3, 9, 15, 21, 28, 34, 40, 46,
153  -3, -9, -15, -21, -28, -34, -40, -46, },
154  { 3, 11, 18, 26, 33, 41, 48, 56,
155  -3, -11, -18, -26, -33, -41, -48, -56, },
156  { 4, 13, 22, 31, 40, 49, 58, 67,
157  -4, -13, -22, -31, -40, -49, -58, -67, },
158  { 5, 16, 26, 37, 48, 59, 69, 80,
159  -5, -16, -26, -37, -48, -59, -69, -80, },
160  { 6, 19, 31, 44, 57, 70, 82, 95,
161  -6, -19, -31, -44, -57, -70, -82, -95, },
162  { 7, 22, 38, 53, 68, 83, 99, 114,
163  -7, -22, -38, -53, -68, -83, -99, -114, },
164  { 9, 27, 45, 63, 81, 99, 117, 135,
165  -9, -27, -45, -63, -81, -99, -117, -135, },
166  { 10, 32, 53, 75, 96, 118, 139, 161,
167  -10, -32, -53, -75, -96, -118, -139, -161, },
168  { 12, 38, 64, 90, 115, 141, 167, 193,
169  -12, -38, -64, -90, -115, -141, -167, -193, },
170  { 15, 45, 76, 106, 137, 167, 198, 228,
171  -15, -45, -76, -106, -137, -167, -198, -228, },
172  { 18, 54, 91, 127, 164, 200, 237, 273,
173  -18, -54, -91, -127, -164, -200, -237, -273, },
174  { 21, 65, 108, 152, 195, 239, 282, 326,
175  -21, -65, -108, -152, -195, -239, -282, -326, },
176  { 25, 77, 129, 181, 232, 284, 336, 388,
177  -25, -77, -129, -181, -232, -284, -336, -388, },
178  { 30, 92, 153, 215, 276, 338, 399, 461,
179  -30, -92, -153, -215, -276, -338, -399, -461, },
180  { 36, 109, 183, 256, 329, 402, 476, 549,
181  -36, -109, -183, -256, -329, -402, -476, -549, },
182  { 43, 130, 218, 305, 392, 479, 567, 654,
183  -43, -130, -218, -305, -392, -479, -567, -654, },
184  { 52, 156, 260, 364, 468, 572, 676, 780,
185  -52, -156, -260, -364, -468, -572, -676, -780, },
186  { 62, 186, 310, 434, 558, 682, 806, 930,
187  -62, -186, -310, -434, -558, -682, -806, -930, },
188  { 73, 221, 368, 516, 663, 811, 958, 1106,
189  -73, -221, -368, -516, -663, -811, -958, -1106, },
190  { 87, 263, 439, 615, 790, 966, 1142, 1318,
191  -87, -263, -439, -615, -790, -966, -1142, -1318, },
192  { 104, 314, 523, 733, 942, 1152, 1361, 1571,
193  -104, -314, -523, -733, -942, -1152, -1361, -1571, },
194  { 124, 374, 623, 873, 1122, 1372, 1621, 1871,
195  -124, -374, -623, -873, -1122, -1372, -1621, -1871, },
196  { 148, 445, 743, 1040, 1337, 1634, 1932, 2229,
197  -148, -445, -743, -1040, -1337, -1634, -1932, -2229, },
198  { 177, 531, 885, 1239, 1593, 1947, 2301, 2655,
199  -177, -531, -885, -1239, -1593, -1947, -2301, -2655, },
200  { 210, 632, 1053, 1475, 1896, 2318, 2739, 3161,
201  -210, -632, -1053, -1475, -1896, -2318, -2739, -3161, },
202  { 251, 753, 1255, 1757, 2260, 2762, 3264, 3766,
203  -251, -753, -1255, -1757, -2260, -2762, -3264, -3766, },
204  { 299, 897, 1495, 2093, 2692, 3290, 3888, 4486,
205  -299, -897, -1495, -2093, -2692, -3290, -3888, -4486, },
206  { 356, 1068, 1781, 2493, 3206, 3918, 4631, 5343,
207  -356, -1068, -1781, -2493, -3206, -3918, -4631, -5343, },
208  { 424, 1273, 2121, 2970, 3819, 4668, 5516, 6365,
209  -424, -1273, -2121, -2970, -3819, -4668, -5516, -6365, },
210 };
211 
212 static const int16_t oki_step_table[49] = {
213  16, 17, 19, 21, 23, 25, 28, 31, 34, 37,
214  41, 45, 50, 55, 60, 66, 73, 80, 88, 97,
215  107, 118, 130, 143, 157, 173, 190, 209, 230, 253,
216  279, 307, 337, 371, 408, 449, 494, 544, 598, 658,
217  724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552
218 };
219 
220 // padded to zero where table size is less then 16
221 static const int8_t swf_index_tables[4][16] = {
222  /*2*/ { -1, 2 },
223  /*3*/ { -1, -1, 2, 4 },
224  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
225  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
226 };
227 
228 static const int8_t zork_index_table[8] = {
229  -1, -1, -1, 1, 4, 7, 10, 12,
230 };
231 
232 static const int8_t mtf_index_table[16] = {
233  8, 6, 4, 2, -1, -1, -1, -1,
234  -1, -1, -1, -1, 2, 4, 6, 8,
235 };
236 
237 /* end of tables */
238 
239 typedef struct ADPCMDecodeContext {
241  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
242  int has_status; /**< Status flag. Reset to 0 after a flush. */
244 
245 static void adpcm_flush(AVCodecContext *avctx);
246 
248 {
249  ADPCMDecodeContext *c = avctx->priv_data;
250  unsigned int min_channels = 1;
251  unsigned int max_channels = 2;
252 
253  adpcm_flush(avctx);
254 
255  switch(avctx->codec->id) {
257  max_channels = 1;
258  break;
261  min_channels = 2;
262  break;
269  max_channels = 6;
270  break;
272  min_channels = 2;
273  max_channels = 8;
274  if (avctx->channels & 1) {
275  avpriv_request_sample(avctx, "channel count %d", avctx->channels);
276  return AVERROR_PATCHWELCOME;
277  }
278  break;
280  max_channels = 8;
281  if (avctx->channels <= 0 || avctx->block_align % (16 * avctx->channels))
282  return AVERROR_INVALIDDATA;
283  break;
287  max_channels = 14;
288  break;
289  }
290  if (avctx->channels < min_channels || avctx->channels > max_channels) {
291  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
292  return AVERROR(EINVAL);
293  }
294 
295  switch(avctx->codec->id) {
297  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
298  return AVERROR_INVALIDDATA;
299  break;
301  if (avctx->bits_per_coded_sample != 4 || avctx->block_align != 17 * avctx->channels)
302  return AVERROR_INVALIDDATA;
303  break;
305  if (avctx->bits_per_coded_sample != 8)
306  return AVERROR_INVALIDDATA;
307  break;
308  default:
309  break;
310  }
311 
312  switch (avctx->codec->id) {
333  break;
335  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
337  break;
339  avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
341  break;
342  default:
343  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
344  }
345  return 0;
346 }
347 
348 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
349 {
350  int delta, pred, step, add;
351 
352  pred = c->predictor;
353  delta = nibble & 7;
354  step = c->step;
355  add = (delta * 2 + 1) * step;
356  if (add < 0)
357  add = add + 7;
358 
359  if ((nibble & 8) == 0)
360  pred = av_clip(pred + (add >> 3), -32767, 32767);
361  else
362  pred = av_clip(pred - (add >> 3), -32767, 32767);
363 
364  switch (delta) {
365  case 7:
366  step *= 0x99;
367  break;
368  case 6:
369  c->step = av_clip(c->step * 2, 127, 24576);
370  c->predictor = pred;
371  return pred;
372  case 5:
373  step *= 0x66;
374  break;
375  case 4:
376  step *= 0x4d;
377  break;
378  default:
379  step *= 0x39;
380  break;
381  }
382 
383  if (step < 0)
384  step += 0x3f;
385 
386  c->step = step >> 6;
387  c->step = av_clip(c->step, 127, 24576);
388  c->predictor = pred;
389  return pred;
390 }
391 
392 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
393 {
394  int step_index;
395  int predictor;
396  int sign, delta, diff, step;
397 
398  step = ff_adpcm_step_table[c->step_index];
399  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
400  step_index = av_clip(step_index, 0, 88);
401 
402  sign = nibble & 8;
403  delta = nibble & 7;
404  /* perform direct multiplication instead of series of jumps proposed by
405  * the reference ADPCM implementation since modern CPUs can do the mults
406  * quickly enough */
407  diff = ((2 * delta + 1) * step) >> shift;
408  predictor = c->predictor;
409  if (sign) predictor -= diff;
410  else predictor += diff;
411 
412  c->predictor = av_clip_int16(predictor);
413  c->step_index = step_index;
414 
415  return (int16_t)c->predictor;
416 }
417 
418 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
419 {
420  int step_index;
421  int predictor;
422  int sign, delta, diff, step;
423 
424  step = ff_adpcm_step_table[c->step_index];
425  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
426  step_index = av_clip(step_index, 0, 88);
427 
428  sign = nibble & 8;
429  delta = nibble & 7;
430  diff = (delta * step) >> shift;
431  predictor = c->predictor;
432  if (sign) predictor -= diff;
433  else predictor += diff;
434 
435  c->predictor = av_clip_int16(predictor);
436  c->step_index = step_index;
437 
438  return (int16_t)c->predictor;
439 }
440 
441 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
442 {
443  int step_index, step, delta, predictor;
444 
445  step = ff_adpcm_step_table[c->step_index];
446 
447  delta = step * (2 * nibble - 15);
448  predictor = c->predictor + delta;
449 
450  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
451  c->predictor = av_clip_int16(predictor >> 4);
452  c->step_index = av_clip(step_index, 0, 88);
453 
454  return (int16_t)c->predictor;
455 }
456 
457 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
458 {
459  int step_index;
460  int predictor;
461  int step;
462 
463  nibble = sign_extend(nibble & 0xF, 4);
464 
465  step = ima_cunning_step_table[c->step_index];
466  step_index = c->step_index + ima_cunning_index_table[abs(nibble)];
467  step_index = av_clip(step_index, 0, 60);
468 
469  predictor = c->predictor + step * nibble;
470 
471  c->predictor = av_clip_int16(predictor);
472  c->step_index = step_index;
473 
474  return c->predictor;
475 }
476 
478 {
479  int nibble, step_index, predictor, sign, delta, diff, step, shift;
480 
481  shift = bps - 1;
482  nibble = get_bits_le(gb, bps),
483  step = ff_adpcm_step_table[c->step_index];
484  step_index = c->step_index + adpcm_index_tables[bps - 2][nibble];
485  step_index = av_clip(step_index, 0, 88);
486 
487  sign = nibble & (1 << shift);
488  delta = av_mod_uintp2(nibble, shift);
489  diff = ((2 * delta + 1) * step) >> shift;
490  predictor = c->predictor;
491  if (sign) predictor -= diff;
492  else predictor += diff;
493 
494  c->predictor = av_clip_int16(predictor);
495  c->step_index = step_index;
496 
497  return (int16_t)c->predictor;
498 }
499 
500 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
501 {
502  int step_index;
503  int predictor;
504  int diff, step;
505 
506  step = ff_adpcm_step_table[c->step_index];
507  step_index = c->step_index + ff_adpcm_index_table[nibble];
508  step_index = av_clip(step_index, 0, 88);
509 
510  diff = step >> 3;
511  if (nibble & 4) diff += step;
512  if (nibble & 2) diff += step >> 1;
513  if (nibble & 1) diff += step >> 2;
514 
515  if (nibble & 8)
516  predictor = c->predictor - diff;
517  else
518  predictor = c->predictor + diff;
519 
520  c->predictor = av_clip_int16(predictor);
521  c->step_index = step_index;
522 
523  return c->predictor;
524 }
525 
526 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
527 {
528  int predictor;
529 
530  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
531  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
532 
533  c->sample2 = c->sample1;
534  c->sample1 = av_clip_int16(predictor);
535  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
536  if (c->idelta < 16) c->idelta = 16;
537  if (c->idelta > INT_MAX/768) {
538  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
539  c->idelta = INT_MAX/768;
540  }
541 
542  return c->sample1;
543 }
544 
545 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
546 {
547  int step_index, predictor, sign, delta, diff, step;
548 
549  step = oki_step_table[c->step_index];
550  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
551  step_index = av_clip(step_index, 0, 48);
552 
553  sign = nibble & 8;
554  delta = nibble & 7;
555  diff = ((2 * delta + 1) * step) >> 3;
556  predictor = c->predictor;
557  if (sign) predictor -= diff;
558  else predictor += diff;
559 
560  c->predictor = av_clip_intp2(predictor, 11);
561  c->step_index = step_index;
562 
563  return c->predictor * 16;
564 }
565 
566 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
567 {
568  int sign, delta, diff;
569  int new_step;
570 
571  sign = nibble & 8;
572  delta = nibble & 7;
573  /* perform direct multiplication instead of series of jumps proposed by
574  * the reference ADPCM implementation since modern CPUs can do the mults
575  * quickly enough */
576  diff = ((2 * delta + 1) * c->step) >> 3;
577  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
578  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
579  c->predictor = av_clip_int16(c->predictor);
580  /* calculate new step and clamp it to range 511..32767 */
581  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
582  c->step = av_clip(new_step, 511, 32767);
583 
584  return (int16_t)c->predictor;
585 }
586 
587 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
588 {
589  int sign, delta, diff;
590 
591  sign = nibble & (1<<(size-1));
592  delta = nibble & ((1<<(size-1))-1);
593  diff = delta << (7 + c->step + shift);
594 
595  /* clamp result */
596  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
597 
598  /* calculate new step */
599  if (delta >= (2*size - 3) && c->step < 3)
600  c->step++;
601  else if (delta == 0 && c->step > 0)
602  c->step--;
603 
604  return (int16_t) c->predictor;
605 }
606 
607 static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
608 {
609  if(!c->step) {
610  c->predictor = 0;
611  c->step = 127;
612  }
613 
614  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
615  c->predictor = av_clip_int16(c->predictor);
616  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
617  c->step = av_clip(c->step, 127, 24576);
618  return c->predictor;
619 }
620 
621 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
622 {
623  c->predictor += mtaf_stepsize[c->step][nibble];
624  c->predictor = av_clip_int16(c->predictor);
625  c->step += ff_adpcm_index_table[nibble];
626  c->step = av_clip_uintp2(c->step, 5);
627  return c->predictor;
628 }
629 
630 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
631 {
632  int16_t index = c->step_index;
633  uint32_t lookup_sample = ff_adpcm_step_table[index];
634  int32_t sample = 0;
635 
636  if (nibble & 0x40)
637  sample += lookup_sample;
638  if (nibble & 0x20)
639  sample += lookup_sample >> 1;
640  if (nibble & 0x10)
641  sample += lookup_sample >> 2;
642  if (nibble & 0x08)
643  sample += lookup_sample >> 3;
644  if (nibble & 0x04)
645  sample += lookup_sample >> 4;
646  if (nibble & 0x02)
647  sample += lookup_sample >> 5;
648  if (nibble & 0x01)
649  sample += lookup_sample >> 6;
650  if (nibble & 0x80)
651  sample = -sample;
652 
653  sample += c->predictor;
655 
656  index += zork_index_table[(nibble >> 4) & 7];
657  index = av_clip(index, 0, 88);
658 
659  c->predictor = sample;
660  c->step_index = index;
661 
662  return sample;
663 }
664 
665 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
666  const uint8_t *in, ADPCMChannelStatus *left,
667  ADPCMChannelStatus *right, int channels, int sample_offset)
668 {
669  int i, j;
670  int shift,filter,f0,f1;
671  int s_1,s_2;
672  int d,s,t;
673 
674  out0 += sample_offset;
675  if (channels == 1)
676  out1 = out0 + 28;
677  else
678  out1 += sample_offset;
679 
680  for(i=0;i<4;i++) {
681  shift = 12 - (in[4+i*2] & 15);
682  filter = in[4+i*2] >> 4;
684  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
685  filter=0;
686  }
687  if (shift < 0) {
688  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
689  shift = 0;
690  }
691  f0 = xa_adpcm_table[filter][0];
692  f1 = xa_adpcm_table[filter][1];
693 
694  s_1 = left->sample1;
695  s_2 = left->sample2;
696 
697  for(j=0;j<28;j++) {
698  d = in[16+i+j*4];
699 
700  t = sign_extend(d, 4);
701  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
702  s_2 = s_1;
703  s_1 = av_clip_int16(s);
704  out0[j] = s_1;
705  }
706 
707  if (channels == 2) {
708  left->sample1 = s_1;
709  left->sample2 = s_2;
710  s_1 = right->sample1;
711  s_2 = right->sample2;
712  }
713 
714  shift = 12 - (in[5+i*2] & 15);
715  filter = in[5+i*2] >> 4;
716  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
717  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
718  filter=0;
719  }
720  if (shift < 0) {
721  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
722  shift = 0;
723  }
724 
725  f0 = xa_adpcm_table[filter][0];
726  f1 = xa_adpcm_table[filter][1];
727 
728  for(j=0;j<28;j++) {
729  d = in[16+i+j*4];
730 
731  t = sign_extend(d >> 4, 4);
732  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
733  s_2 = s_1;
734  s_1 = av_clip_int16(s);
735  out1[j] = s_1;
736  }
737 
738  if (channels == 2) {
739  right->sample1 = s_1;
740  right->sample2 = s_2;
741  } else {
742  left->sample1 = s_1;
743  left->sample2 = s_2;
744  }
745 
746  out0 += 28 * (3 - channels);
747  out1 += 28 * (3 - channels);
748  }
749 
750  return 0;
751 }
752 
753 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
754 {
755  ADPCMDecodeContext *c = avctx->priv_data;
756  GetBitContext gb;
757  const int8_t *table;
758  int k0, signmask, nb_bits, count;
759  int size = buf_size*8;
760  int i;
761 
762  init_get_bits(&gb, buf, size);
763 
764  //read bits & initial values
765  nb_bits = get_bits(&gb, 2)+2;
766  table = swf_index_tables[nb_bits-2];
767  k0 = 1 << (nb_bits-2);
768  signmask = 1 << (nb_bits-1);
769 
770  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
771  for (i = 0; i < avctx->channels; i++) {
772  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
773  c->status[i].step_index = get_bits(&gb, 6);
774  }
775 
776  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
777  int i;
778 
779  for (i = 0; i < avctx->channels; i++) {
780  // similar to IMA adpcm
781  int delta = get_bits(&gb, nb_bits);
782  int step = ff_adpcm_step_table[c->status[i].step_index];
783  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
784  int k = k0;
785 
786  do {
787  if (delta & k)
788  vpdiff += step;
789  step >>= 1;
790  k >>= 1;
791  } while(k);
792  vpdiff += step;
793 
794  if (delta & signmask)
795  c->status[i].predictor -= vpdiff;
796  else
797  c->status[i].predictor += vpdiff;
798 
799  c->status[i].step_index += table[delta & (~signmask)];
800 
801  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
802  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
803 
804  *samples++ = c->status[i].predictor;
805  }
806  }
807  }
808 }
809 
810 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
811 {
812  int sample = sign_extend(nibble, 4) * (1 << shift);
813 
814  if (flag)
815  sample += (8 * cs->sample1) - (4 * cs->sample2);
816  else
817  sample += 4 * cs->sample1;
818 
819  sample = av_clip_int16(sample >> 2);
820 
821  cs->sample2 = cs->sample1;
822  cs->sample1 = sample;
823 
824  return sample;
825 }
826 
827 /**
828  * Get the number of samples (per channel) that will be decoded from the packet.
829  * In one case, this is actually the maximum number of samples possible to
830  * decode with the given buf_size.
831  *
832  * @param[out] coded_samples set to the number of samples as coded in the
833  * packet, or 0 if the codec does not encode the
834  * number of samples in each frame.
835  * @param[out] approx_nb_samples set to non-zero if the number of samples
836  * returned is an approximation.
837  */
839  int buf_size, int *coded_samples, int *approx_nb_samples)
840 {
841  ADPCMDecodeContext *s = avctx->priv_data;
842  int nb_samples = 0;
843  int ch = avctx->channels;
844  int has_coded_samples = 0;
845  int header_size;
846 
847  *coded_samples = 0;
848  *approx_nb_samples = 0;
849 
850  if(ch <= 0)
851  return 0;
852 
853  switch (avctx->codec->id) {
854  /* constant, only check buf_size */
856  if (buf_size < 76 * ch)
857  return 0;
858  nb_samples = 128;
859  break;
861  if (buf_size < 34 * ch)
862  return 0;
863  nb_samples = 64;
864  break;
865  /* simple 4-bit adpcm */
878  nb_samples = buf_size * 2 / ch;
879  break;
880  }
881  if (nb_samples)
882  return nb_samples;
883 
884  /* simple 4-bit adpcm, with header */
885  header_size = 0;
886  switch (avctx->codec->id) {
892  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
893  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
894  }
895  if (header_size > 0)
896  return (buf_size - header_size) * 2 / ch;
897 
898  /* more complex formats */
899  switch (avctx->codec->id) {
901  bytestream2_skip(gb, 4);
902  has_coded_samples = 1;
903  *coded_samples = bytestream2_get_le32u(gb);
904  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
905  bytestream2_seek(gb, -8, SEEK_CUR);
906  break;
908  has_coded_samples = 1;
909  *coded_samples = bytestream2_get_le32(gb);
910  *coded_samples -= *coded_samples % 28;
911  nb_samples = (buf_size - 12) / 30 * 28;
912  break;
914  has_coded_samples = 1;
915  *coded_samples = bytestream2_get_le32(gb);
916  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
917  break;
919  nb_samples = (buf_size - ch) / ch * 2;
920  break;
924  /* maximum number of samples */
925  /* has internal offsets and a per-frame switch to signal raw 16-bit */
926  has_coded_samples = 1;
927  switch (avctx->codec->id) {
929  header_size = 4 + 9 * ch;
930  *coded_samples = bytestream2_get_le32(gb);
931  break;
933  header_size = 4 + 5 * ch;
934  *coded_samples = bytestream2_get_le32(gb);
935  break;
937  header_size = 4 + 5 * ch;
938  *coded_samples = bytestream2_get_be32(gb);
939  break;
940  }
941  *coded_samples -= *coded_samples % 28;
942  nb_samples = (buf_size - header_size) * 2 / ch;
943  nb_samples -= nb_samples % 28;
944  *approx_nb_samples = 1;
945  break;
947  if (avctx->block_align > 0)
948  buf_size = FFMIN(buf_size, avctx->block_align);
949  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
950  break;
952  if (avctx->block_align > 0)
953  buf_size = FFMIN(buf_size, avctx->block_align);
954  if (buf_size < 4 * ch)
955  return AVERROR_INVALIDDATA;
956  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
957  break;
959  if (avctx->block_align > 0)
960  buf_size = FFMIN(buf_size, avctx->block_align);
961  nb_samples = (buf_size - 4 * ch) * 2 / ch;
962  break;
963  CASE(ADPCM_IMA_WAV,
964  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
965  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
966  if (avctx->block_align > 0)
967  buf_size = FFMIN(buf_size, avctx->block_align);
968  if (buf_size < 4 * ch)
969  return AVERROR_INVALIDDATA;
970  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
971  ) /* End of CASE */
973  if (avctx->block_align > 0)
974  buf_size = FFMIN(buf_size, avctx->block_align);
975  nb_samples = (buf_size - 6 * ch) * 2 / ch;
976  break;
978  if (avctx->block_align > 0)
979  buf_size = FFMIN(buf_size, avctx->block_align);
980  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
981  break;
985  {
986  int samples_per_byte;
987  switch (avctx->codec->id) {
988  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
989  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
990  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
991  }
992  if (!s->status[0].step_index) {
993  if (buf_size < ch)
994  return AVERROR_INVALIDDATA;
995  nb_samples++;
996  buf_size -= ch;
997  }
998  nb_samples += buf_size * samples_per_byte / ch;
999  break;
1000  }
1001  case AV_CODEC_ID_ADPCM_SWF:
1002  {
1003  int buf_bits = buf_size * 8 - 2;
1004  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
1005  int block_hdr_size = 22 * ch;
1006  int block_size = block_hdr_size + nbits * ch * 4095;
1007  int nblocks = buf_bits / block_size;
1008  int bits_left = buf_bits - nblocks * block_size;
1009  nb_samples = nblocks * 4096;
1010  if (bits_left >= block_hdr_size)
1011  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
1012  break;
1013  }
1014  case AV_CODEC_ID_ADPCM_THP:
1016  if (avctx->extradata) {
1017  nb_samples = buf_size * 14 / (8 * ch);
1018  break;
1019  }
1020  has_coded_samples = 1;
1021  bytestream2_skip(gb, 4); // channel size
1022  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
1023  bytestream2_get_le32(gb) :
1024  bytestream2_get_be32(gb);
1025  buf_size -= 8 + 36 * ch;
1026  buf_size /= ch;
1027  nb_samples = buf_size / 8 * 14;
1028  if (buf_size % 8 > 1)
1029  nb_samples += (buf_size % 8 - 1) * 2;
1030  *approx_nb_samples = 1;
1031  break;
1032  case AV_CODEC_ID_ADPCM_AFC:
1033  nb_samples = buf_size / (9 * ch) * 16;
1034  break;
1035  case AV_CODEC_ID_ADPCM_XA:
1036  nb_samples = (buf_size / 128) * 224 / ch;
1037  break;
1038  case AV_CODEC_ID_ADPCM_DTK:
1039  case AV_CODEC_ID_ADPCM_PSX:
1040  nb_samples = buf_size / (16 * ch) * 28;
1041  break;
1043  nb_samples = buf_size / avctx->block_align * 32;
1044  break;
1046  nb_samples = buf_size / ch;
1047  break;
1048  }
1049 
1050  /* validate coded sample count */
1051  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
1052  return AVERROR_INVALIDDATA;
1053 
1054  return nb_samples;
1055 }
1056 
1057 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
1058  int *got_frame_ptr, AVPacket *avpkt)
1059 {
1060  AVFrame *frame = data;
1061  const uint8_t *buf = avpkt->data;
1062  int buf_size = avpkt->size;
1063  ADPCMDecodeContext *c = avctx->priv_data;
1064  int16_t *samples;
1065  int16_t **samples_p;
1066  int st; /* stereo */
1067  int nb_samples, coded_samples, approx_nb_samples, ret;
1068  GetByteContext gb;
1069 
1070  bytestream2_init(&gb, buf, buf_size);
1071  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
1072  if (nb_samples <= 0) {
1073  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
1074  return AVERROR_INVALIDDATA;
1075  }
1076 
1077  /* get output buffer */
1078  frame->nb_samples = nb_samples;
1079  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1080  return ret;
1081  samples = (int16_t *)frame->data[0];
1082  samples_p = (int16_t **)frame->extended_data;
1083 
1084  /* use coded_samples when applicable */
1085  /* it is always <= nb_samples, so the output buffer will be large enough */
1086  if (coded_samples) {
1087  if (!approx_nb_samples && coded_samples != nb_samples)
1088  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
1089  frame->nb_samples = nb_samples = coded_samples;
1090  }
1091 
1092  st = avctx->channels == 2 ? 1 : 0;
1093 
1094  switch(avctx->codec->id) {
1095  CASE(ADPCM_IMA_QT,
1096  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
1097  Channel data is interleaved per-chunk. */
1098  for (int channel = 0; channel < avctx->channels; channel++) {
1099  ADPCMChannelStatus *cs = &c->status[channel];
1100  int predictor;
1101  int step_index;
1102  /* (pppppp) (piiiiiii) */
1103 
1104  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
1105  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1106  step_index = predictor & 0x7F;
1107  predictor &= ~0x7F;
1108 
1109  if (cs->step_index == step_index) {
1110  int diff = predictor - cs->predictor;
1111  if (diff < 0)
1112  diff = - diff;
1113  if (diff > 0x7f)
1114  goto update;
1115  } else {
1116  update:
1117  cs->step_index = step_index;
1118  cs->predictor = predictor;
1119  }
1120 
1121  if (cs->step_index > 88u){
1122  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1123  channel, cs->step_index);
1124  return AVERROR_INVALIDDATA;
1125  }
1126 
1127  samples = samples_p[channel];
1128 
1129  for (int m = 0; m < 64; m += 2) {
1130  int byte = bytestream2_get_byteu(&gb);
1131  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1132  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1133  }
1134  }
1135  ) /* End of CASE */
1136  CASE(ADPCM_IMA_WAV,
1137  for (int i = 0; i < avctx->channels; i++) {
1138  ADPCMChannelStatus *cs = &c->status[i];
1139  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1140 
1141  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1142  if (cs->step_index > 88u){
1143  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1144  i, cs->step_index);
1145  return AVERROR_INVALIDDATA;
1146  }
1147  }
1148 
1149  if (avctx->bits_per_coded_sample != 4) {
1150  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1151  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1152  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
1153  GetBitContext g;
1154 
1155  for (int n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1156  for (int i = 0; i < avctx->channels; i++) {
1157  ADPCMChannelStatus *cs = &c->status[i];
1158  samples = &samples_p[i][1 + n * samples_per_block];
1159  for (int j = 0; j < block_size; j++) {
1160  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
1161  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
1162  }
1163  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1164  if (ret < 0)
1165  return ret;
1166  for (int m = 0; m < samples_per_block; m++) {
1168  avctx->bits_per_coded_sample);
1169  }
1170  }
1171  }
1172  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
1173  } else {
1174  for (int n = 0; n < (nb_samples - 1) / 8; n++) {
1175  for (int i = 0; i < avctx->channels; i++) {
1176  ADPCMChannelStatus *cs = &c->status[i];
1177  samples = &samples_p[i][1 + n * 8];
1178  for (int m = 0; m < 8; m += 2) {
1179  int v = bytestream2_get_byteu(&gb);
1180  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1181  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1182  }
1183  }
1184  }
1185  }
1186  ) /* End of CASE */
1187  CASE(ADPCM_4XM,
1188  for (int i = 0; i < avctx->channels; i++)
1189  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1190 
1191  for (int i = 0; i < avctx->channels; i++) {
1192  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1193  if (c->status[i].step_index > 88u) {
1194  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1195  i, c->status[i].step_index);
1196  return AVERROR_INVALIDDATA;
1197  }
1198  }
1199 
1200  for (int i = 0; i < avctx->channels; i++) {
1201  ADPCMChannelStatus *cs = &c->status[i];
1202  samples = (int16_t *)frame->data[i];
1203  for (int n = nb_samples >> 1; n > 0; n--) {
1204  int v = bytestream2_get_byteu(&gb);
1205  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1206  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1207  }
1208  }
1209  ) /* End of CASE */
1210  CASE(ADPCM_AGM,
1211  for (int i = 0; i < avctx->channels; i++)
1212  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1213  for (int i = 0; i < avctx->channels; i++)
1214  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1215 
1216  for (int n = 0; n < nb_samples >> (1 - st); n++) {
1217  int v = bytestream2_get_byteu(&gb);
1218  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1219  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1220  }
1221  ) /* End of CASE */
1222  CASE(ADPCM_MS,
1223  int block_predictor;
1224 
1225  if (avctx->channels > 2) {
1226  for (int channel = 0; channel < avctx->channels; channel++) {
1227  samples = samples_p[channel];
1228  block_predictor = bytestream2_get_byteu(&gb);
1229  if (block_predictor > 6) {
1230  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1231  channel, block_predictor);
1232  return AVERROR_INVALIDDATA;
1233  }
1234  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1235  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1236  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1237  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1238  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1239  *samples++ = c->status[channel].sample2;
1240  *samples++ = c->status[channel].sample1;
1241  for (int n = (nb_samples - 2) >> 1; n > 0; n--) {
1242  int byte = bytestream2_get_byteu(&gb);
1243  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1244  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1245  }
1246  }
1247  } else {
1248  block_predictor = bytestream2_get_byteu(&gb);
1249  if (block_predictor > 6) {
1250  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1251  block_predictor);
1252  return AVERROR_INVALIDDATA;
1253  }
1254  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1255  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1256  if (st) {
1257  block_predictor = bytestream2_get_byteu(&gb);
1258  if (block_predictor > 6) {
1259  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1260  block_predictor);
1261  return AVERROR_INVALIDDATA;
1262  }
1263  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1264  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1265  }
1266  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1267  if (st){
1268  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1269  }
1270 
1271  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1272  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1273  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1274  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1275 
1276  *samples++ = c->status[0].sample2;
1277  if (st) *samples++ = c->status[1].sample2;
1278  *samples++ = c->status[0].sample1;
1279  if (st) *samples++ = c->status[1].sample1;
1280  for (int n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1281  int byte = bytestream2_get_byteu(&gb);
1282  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1283  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1284  }
1285  }
1286  ) /* End of CASE */
1287  CASE(ADPCM_MTAF,
1288  for (int channel = 0; channel < avctx->channels; channel += 2) {
1289  bytestream2_skipu(&gb, 4);
1290  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1291  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1292  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1293  bytestream2_skipu(&gb, 2);
1294  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1295  bytestream2_skipu(&gb, 2);
1296  for (int n = 0; n < nb_samples; n += 2) {
1297  int v = bytestream2_get_byteu(&gb);
1298  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1299  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1300  }
1301  for (int n = 0; n < nb_samples; n += 2) {
1302  int v = bytestream2_get_byteu(&gb);
1303  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1304  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1305  }
1306  }
1307  ) /* End of CASE */
1308  CASE(ADPCM_IMA_DK4,
1309  for (int channel = 0; channel < avctx->channels; channel++) {
1310  ADPCMChannelStatus *cs = &c->status[channel];
1311  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1312  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1313  if (cs->step_index > 88u){
1314  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1315  channel, cs->step_index);
1316  return AVERROR_INVALIDDATA;
1317  }
1318  }
1319  for (int n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1320  int v = bytestream2_get_byteu(&gb);
1321  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1322  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1323  }
1324  ) /* End of CASE */
1325 
1326  /* DK3 ADPCM support macro */
1327 #define DK3_GET_NEXT_NIBBLE() \
1328  if (decode_top_nibble_next) { \
1329  nibble = last_byte >> 4; \
1330  decode_top_nibble_next = 0; \
1331  } else { \
1332  last_byte = bytestream2_get_byteu(&gb); \
1333  nibble = last_byte & 0x0F; \
1334  decode_top_nibble_next = 1; \
1335  }
1336  CASE(ADPCM_IMA_DK3,
1337  int last_byte = 0;
1338  int nibble;
1339  int decode_top_nibble_next = 0;
1340  int diff_channel;
1341  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1342 
1343  bytestream2_skipu(&gb, 10);
1344  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1345  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1346  c->status[0].step_index = bytestream2_get_byteu(&gb);
1347  c->status[1].step_index = bytestream2_get_byteu(&gb);
1348  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1349  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1350  c->status[0].step_index, c->status[1].step_index);
1351  return AVERROR_INVALIDDATA;
1352  }
1353  /* sign extend the predictors */
1354  diff_channel = c->status[1].predictor;
1355 
1356  while (samples < samples_end) {
1357 
1358  /* for this algorithm, c->status[0] is the sum channel and
1359  * c->status[1] is the diff channel */
1360 
1361  /* process the first predictor of the sum channel */
1363  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1364 
1365  /* process the diff channel predictor */
1367  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1368 
1369  /* process the first pair of stereo PCM samples */
1370  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1371  *samples++ = c->status[0].predictor + c->status[1].predictor;
1372  *samples++ = c->status[0].predictor - c->status[1].predictor;
1373 
1374  /* process the second predictor of the sum channel */
1376  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1377 
1378  /* process the second pair of stereo PCM samples */
1379  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1380  *samples++ = c->status[0].predictor + c->status[1].predictor;
1381  *samples++ = c->status[0].predictor - c->status[1].predictor;
1382  }
1383 
1384  if ((bytestream2_tell(&gb) & 1))
1385  bytestream2_skip(&gb, 1);
1386  ) /* End of CASE */
1387  CASE(ADPCM_IMA_ISS,
1388  for (int channel = 0; channel < avctx->channels; channel++) {
1389  ADPCMChannelStatus *cs = &c->status[channel];
1390  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1391  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1392  if (cs->step_index > 88u){
1393  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1394  channel, cs->step_index);
1395  return AVERROR_INVALIDDATA;
1396  }
1397  }
1398 
1399  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1400  int v1, v2;
1401  int v = bytestream2_get_byteu(&gb);
1402  /* nibbles are swapped for mono */
1403  if (st) {
1404  v1 = v >> 4;
1405  v2 = v & 0x0F;
1406  } else {
1407  v2 = v >> 4;
1408  v1 = v & 0x0F;
1409  }
1410  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1411  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1412  }
1413  ) /* End of CASE */
1414  CASE(ADPCM_IMA_MOFLEX,
1415  for (int channel = 0; channel < avctx->channels; channel++) {
1416  ADPCMChannelStatus *cs = &c->status[channel];
1417  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1418  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1419  if (cs->step_index > 88u){
1420  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1421  channel, cs->step_index);
1422  return AVERROR_INVALIDDATA;
1423  }
1424  }
1425 
1426  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1427  for (int channel = 0; channel < avctx->channels; channel++) {
1428  samples = samples_p[channel] + 256 * subframe;
1429  for (int n = 0; n < 256; n += 2) {
1430  int v = bytestream2_get_byteu(&gb);
1431  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1432  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1433  }
1434  }
1435  }
1436  ) /* End of CASE */
1437  CASE(ADPCM_IMA_DAT4,
1438  for (int channel = 0; channel < avctx->channels; channel++) {
1439  ADPCMChannelStatus *cs = &c->status[channel];
1440  samples = samples_p[channel];
1441  bytestream2_skip(&gb, 4);
1442  for (int n = 0; n < nb_samples; n += 2) {
1443  int v = bytestream2_get_byteu(&gb);
1444  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1445  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1446  }
1447  }
1448  ) /* End of CASE */
1449  CASE(ADPCM_IMA_APC,
1450  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1451  int v = bytestream2_get_byteu(&gb);
1452  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1453  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1454  }
1455  ) /* End of CASE */
1456  CASE(ADPCM_IMA_SSI,
1457  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1458  int v = bytestream2_get_byteu(&gb);
1459  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1460  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1461  }
1462  ) /* End of CASE */
1463  CASE(ADPCM_IMA_APM,
1464  for (int n = nb_samples / 2; n > 0; n--) {
1465  for (int channel = 0; channel < avctx->channels; channel++) {
1466  int v = bytestream2_get_byteu(&gb);
1467  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1468  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1469  }
1470  samples += avctx->channels;
1471  }
1472  ) /* End of CASE */
1473  CASE(ADPCM_IMA_ALP,
1474  for (int n = nb_samples / 2; n > 0; n--) {
1475  for (int channel = 0; channel < avctx->channels; channel++) {
1476  int v = bytestream2_get_byteu(&gb);
1477  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1478  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1479  }
1480  samples += avctx->channels;
1481  }
1482  ) /* End of CASE */
1483  CASE(ADPCM_IMA_CUNNING,
1484  for (int channel = 0; channel < avctx->channels; channel++) {
1485  int16_t *smp = samples_p[channel];
1486  for (int n = 0; n < nb_samples / 2; n++) {
1487  int v = bytestream2_get_byteu(&gb);
1488  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1489  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1490  }
1491  }
1492  ) /* End of CASE */
1493  CASE(ADPCM_IMA_OKI,
1494  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1495  int v = bytestream2_get_byteu(&gb);
1496  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1497  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1498  }
1499  ) /* End of CASE */
1500  CASE(ADPCM_IMA_RAD,
1501  for (int channel = 0; channel < avctx->channels; channel++) {
1502  ADPCMChannelStatus *cs = &c->status[channel];
1503  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1504  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1505  if (cs->step_index > 88u){
1506  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1507  channel, cs->step_index);
1508  return AVERROR_INVALIDDATA;
1509  }
1510  }
1511  for (int n = 0; n < nb_samples / 2; n++) {
1512  int byte[2];
1513 
1514  byte[0] = bytestream2_get_byteu(&gb);
1515  if (st)
1516  byte[1] = bytestream2_get_byteu(&gb);
1517  for (int channel = 0; channel < avctx->channels; channel++) {
1518  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1519  }
1520  for (int channel = 0; channel < avctx->channels; channel++) {
1521  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1522  }
1523  }
1524  ) /* End of CASE */
1525  CASE(ADPCM_IMA_WS,
1526  if (c->vqa_version == 3) {
1527  for (int channel = 0; channel < avctx->channels; channel++) {
1528  int16_t *smp = samples_p[channel];
1529 
1530  for (int n = nb_samples / 2; n > 0; n--) {
1531  int v = bytestream2_get_byteu(&gb);
1532  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1533  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1534  }
1535  }
1536  } else {
1537  for (int n = nb_samples / 2; n > 0; n--) {
1538  for (int channel = 0; channel < avctx->channels; channel++) {
1539  int v = bytestream2_get_byteu(&gb);
1540  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1541  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1542  }
1543  samples += avctx->channels;
1544  }
1545  }
1546  bytestream2_seek(&gb, 0, SEEK_END);
1547  ) /* End of CASE */
1548  CASE(ADPCM_XA,
1549  int16_t *out0 = samples_p[0];
1550  int16_t *out1 = samples_p[1];
1551  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1552  int sample_offset = 0;
1553  int bytes_remaining;
1554  while (bytestream2_get_bytes_left(&gb) >= 128) {
1555  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1556  &c->status[0], &c->status[1],
1557  avctx->channels, sample_offset)) < 0)
1558  return ret;
1559  bytestream2_skipu(&gb, 128);
1560  sample_offset += samples_per_block;
1561  }
1562  /* Less than a full block of data left, e.g. when reading from
1563  * 2324 byte per sector XA; the remainder is padding */
1564  bytes_remaining = bytestream2_get_bytes_left(&gb);
1565  if (bytes_remaining > 0) {
1566  bytestream2_skip(&gb, bytes_remaining);
1567  }
1568  ) /* End of CASE */
1569  CASE(ADPCM_IMA_EA_EACS,
1570  for (int i = 0; i <= st; i++) {
1571  c->status[i].step_index = bytestream2_get_le32u(&gb);
1572  if (c->status[i].step_index > 88u) {
1573  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1574  i, c->status[i].step_index);
1575  return AVERROR_INVALIDDATA;
1576  }
1577  }
1578  for (int i = 0; i <= st; i++) {
1579  c->status[i].predictor = bytestream2_get_le32u(&gb);
1580  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1581  return AVERROR_INVALIDDATA;
1582  }
1583 
1584  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1585  int byte = bytestream2_get_byteu(&gb);
1586  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1587  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1588  }
1589  ) /* End of CASE */
1590  CASE(ADPCM_IMA_EA_SEAD,
1591  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1592  int byte = bytestream2_get_byteu(&gb);
1593  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1594  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1595  }
1596  ) /* End of CASE */
1597  CASE(ADPCM_EA,
1598  int previous_left_sample, previous_right_sample;
1599  int current_left_sample, current_right_sample;
1600  int next_left_sample, next_right_sample;
1601  int coeff1l, coeff2l, coeff1r, coeff2r;
1602  int shift_left, shift_right;
1603 
1604  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1605  each coding 28 stereo samples. */
1606 
1607  if(avctx->channels != 2)
1608  return AVERROR_INVALIDDATA;
1609 
1610  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1611  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1612  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1613  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1614 
1615  for (int count1 = 0; count1 < nb_samples / 28; count1++) {
1616  int byte = bytestream2_get_byteu(&gb);
1617  coeff1l = ea_adpcm_table[ byte >> 4 ];
1618  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1619  coeff1r = ea_adpcm_table[ byte & 0x0F];
1620  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1621 
1622  byte = bytestream2_get_byteu(&gb);
1623  shift_left = 20 - (byte >> 4);
1624  shift_right = 20 - (byte & 0x0F);
1625 
1626  for (int count2 = 0; count2 < 28; count2++) {
1627  byte = bytestream2_get_byteu(&gb);
1628  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1629  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1630 
1631  next_left_sample = (next_left_sample +
1632  (current_left_sample * coeff1l) +
1633  (previous_left_sample * coeff2l) + 0x80) >> 8;
1634  next_right_sample = (next_right_sample +
1635  (current_right_sample * coeff1r) +
1636  (previous_right_sample * coeff2r) + 0x80) >> 8;
1637 
1638  previous_left_sample = current_left_sample;
1639  current_left_sample = av_clip_int16(next_left_sample);
1640  previous_right_sample = current_right_sample;
1641  current_right_sample = av_clip_int16(next_right_sample);
1642  *samples++ = current_left_sample;
1643  *samples++ = current_right_sample;
1644  }
1645  }
1646 
1647  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1648  ) /* End of CASE */
1649  CASE(ADPCM_EA_MAXIS_XA,
1650  int coeff[2][2], shift[2];
1651 
1652  for (int channel = 0; channel < avctx->channels; channel++) {
1653  int byte = bytestream2_get_byteu(&gb);
1654  for (int i = 0; i < 2; i++)
1655  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1656  shift[channel] = 20 - (byte & 0x0F);
1657  }
1658  for (int count1 = 0; count1 < nb_samples / 2; count1++) {
1659  int byte[2];
1660 
1661  byte[0] = bytestream2_get_byteu(&gb);
1662  if (st) byte[1] = bytestream2_get_byteu(&gb);
1663  for (int i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1664  for (int channel = 0; channel < avctx->channels; channel++) {
1665  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1666  sample = (sample +
1667  c->status[channel].sample1 * coeff[channel][0] +
1668  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1669  c->status[channel].sample2 = c->status[channel].sample1;
1670  c->status[channel].sample1 = av_clip_int16(sample);
1671  *samples++ = c->status[channel].sample1;
1672  }
1673  }
1674  }
1675  bytestream2_seek(&gb, 0, SEEK_END);
1676  ) /* End of CASE */
1677 #if CONFIG_ADPCM_EA_R1_DECODER || CONFIG_ADPCM_EA_R2_DECODER || CONFIG_ADPCM_EA_R3_DECODER
1680  case AV_CODEC_ID_ADPCM_EA_R3: {
1681  /* channel numbering
1682  2chan: 0=fl, 1=fr
1683  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1684  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1685  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1686  int previous_sample, current_sample, next_sample;
1687  int coeff1, coeff2;
1688  int shift;
1689  uint16_t *samplesC;
1690  int count = 0;
1691  int offsets[6];
1692 
1693  for (unsigned channel = 0; channel < avctx->channels; channel++)
1694  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1695  bytestream2_get_le32(&gb)) +
1696  (avctx->channels + 1) * 4;
1697 
1698  for (unsigned channel = 0; channel < avctx->channels; channel++) {
1699  int count1;
1700 
1701  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1702  samplesC = samples_p[channel];
1703 
1704  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1705  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1706  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1707  } else {
1708  current_sample = c->status[channel].predictor;
1709  previous_sample = c->status[channel].prev_sample;
1710  }
1711 
1712  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1713  int byte = bytestream2_get_byte(&gb);
1714  if (byte == 0xEE) { /* only seen in R2 and R3 */
1715  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1716  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1717 
1718  for (int count2 = 0; count2 < 28; count2++)
1719  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1720  } else {
1721  coeff1 = ea_adpcm_table[ byte >> 4 ];
1722  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1723  shift = 20 - (byte & 0x0F);
1724 
1725  for (int count2 = 0; count2 < 28; count2++) {
1726  if (count2 & 1)
1727  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1728  else {
1729  byte = bytestream2_get_byte(&gb);
1730  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1731  }
1732 
1733  next_sample += (current_sample * coeff1) +
1734  (previous_sample * coeff2);
1735  next_sample = av_clip_int16(next_sample >> 8);
1736 
1737  previous_sample = current_sample;
1738  current_sample = next_sample;
1739  *samplesC++ = current_sample;
1740  }
1741  }
1742  }
1743  if (!count) {
1744  count = count1;
1745  } else if (count != count1) {
1746  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1747  count = FFMAX(count, count1);
1748  }
1749 
1750  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1751  c->status[channel].predictor = current_sample;
1752  c->status[channel].prev_sample = previous_sample;
1753  }
1754  }
1755 
1756  frame->nb_samples = count * 28;
1757  bytestream2_seek(&gb, 0, SEEK_END);
1758  break;
1759  }
1760 #endif /* CONFIG_ADPCM_EA_Rx_DECODER */
1761  CASE(ADPCM_EA_XAS,
1762  for (int channel=0; channel < avctx->channels; channel++) {
1763  int coeff[2][4], shift[4];
1764  int16_t *s = samples_p[channel];
1765  for (int n = 0; n < 4; n++, s += 32) {
1766  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1767  for (int i = 0; i < 2; i++)
1768  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1769  s[0] = val & ~0x0F;
1770 
1771  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1772  shift[n] = 20 - (val & 0x0F);
1773  s[1] = val & ~0x0F;
1774  }
1775 
1776  for (int m = 2; m < 32; m += 2) {
1777  s = &samples_p[channel][m];
1778  for (int n = 0; n < 4; n++, s += 32) {
1779  int level, pred;
1780  int byte = bytestream2_get_byteu(&gb);
1781 
1782  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1783  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1784  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1785 
1786  level = sign_extend(byte, 4) * (1 << shift[n]);
1787  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1788  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1789  }
1790  }
1791  }
1792  ) /* End of CASE */
1793  CASE(ADPCM_IMA_ACORN,
1794  for (int channel = 0; channel < avctx->channels; channel++) {
1795  ADPCMChannelStatus *cs = &c->status[channel];
1796  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1797  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
1798  if (cs->step_index > 88u){
1799  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1800  channel, cs->step_index);
1801  return AVERROR_INVALIDDATA;
1802  }
1803  }
1804  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1805  int byte = bytestream2_get_byteu(&gb);
1806  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
1807  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
1808  }
1809  ) /* End of CASE */
1810  CASE(ADPCM_IMA_AMV,
1811  av_assert0(avctx->channels == 1);
1812 
1813  /*
1814  * Header format:
1815  * int16_t predictor;
1816  * uint8_t step_index;
1817  * uint8_t reserved;
1818  * uint32_t frame_size;
1819  *
1820  * Some implementations have step_index as 16-bits, but others
1821  * only use the lower 8 and store garbage in the upper 8.
1822  */
1823  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1824  c->status[0].step_index = bytestream2_get_byteu(&gb);
1825  bytestream2_skipu(&gb, 5);
1826  if (c->status[0].step_index > 88u) {
1827  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1828  c->status[0].step_index);
1829  return AVERROR_INVALIDDATA;
1830  }
1831 
1832  for (int n = nb_samples >> 1; n > 0; n--) {
1833  int v = bytestream2_get_byteu(&gb);
1834 
1835  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1836  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1837  }
1838 
1839  if (nb_samples & 1) {
1840  int v = bytestream2_get_byteu(&gb);
1841  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1842 
1843  if (v & 0x0F) {
1844  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1845  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1846  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1847  }
1848  }
1849  ) /* End of CASE */
1850  CASE(ADPCM_IMA_SMJPEG,
1851  for (int i = 0; i < avctx->channels; i++) {
1852  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1853  c->status[i].step_index = bytestream2_get_byteu(&gb);
1854  bytestream2_skipu(&gb, 1);
1855  if (c->status[i].step_index > 88u) {
1856  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1857  c->status[i].step_index);
1858  return AVERROR_INVALIDDATA;
1859  }
1860  }
1861 
1862  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1863  int v = bytestream2_get_byteu(&gb);
1864 
1865  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1866  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1867  }
1868  ) /* End of CASE */
1869  CASE(ADPCM_CT,
1870  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1871  int v = bytestream2_get_byteu(&gb);
1872  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1873  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1874  }
1875  ) /* End of CASE */
1876 #if CONFIG_ADPCM_SBPRO_2_DECODER || CONFIG_ADPCM_SBPRO_3_DECODER || \
1877  CONFIG_ADPCM_SBPRO_4_DECODER
1881  if (!c->status[0].step_index) {
1882  /* the first byte is a raw sample */
1883  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1884  if (st)
1885  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1886  c->status[0].step_index = 1;
1887  nb_samples--;
1888  }
1889  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1890  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1891  int byte = bytestream2_get_byteu(&gb);
1892  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1893  byte >> 4, 4, 0);
1894  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1895  byte & 0x0F, 4, 0);
1896  }
1897  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1898  for (int n = (nb_samples<<st) / 3; n > 0; n--) {
1899  int byte = bytestream2_get_byteu(&gb);
1900  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1901  byte >> 5 , 3, 0);
1902  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1903  (byte >> 2) & 0x07, 3, 0);
1904  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1905  byte & 0x03, 2, 0);
1906  }
1907  } else {
1908  for (int n = nb_samples >> (2 - st); n > 0; n--) {
1909  int byte = bytestream2_get_byteu(&gb);
1910  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1911  byte >> 6 , 2, 2);
1912  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1913  (byte >> 4) & 0x03, 2, 2);
1914  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1915  (byte >> 2) & 0x03, 2, 2);
1916  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1917  byte & 0x03, 2, 2);
1918  }
1919  }
1920  break;
1921 #endif /* CONFIG_ADPCM_SBPRO_x_DECODER */
1922  CASE(ADPCM_SWF,
1923  adpcm_swf_decode(avctx, buf, buf_size, samples);
1924  bytestream2_seek(&gb, 0, SEEK_END);
1925  ) /* End of CASE */
1926  CASE(ADPCM_YAMAHA,
1927  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1928  int v = bytestream2_get_byteu(&gb);
1929  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1930  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1931  }
1932  ) /* End of CASE */
1933  CASE(ADPCM_AICA,
1934  for (int channel = 0; channel < avctx->channels; channel++) {
1935  samples = samples_p[channel];
1936  for (int n = nb_samples >> 1; n > 0; n--) {
1937  int v = bytestream2_get_byteu(&gb);
1938  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1939  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1940  }
1941  }
1942  ) /* End of CASE */
1943  CASE(ADPCM_AFC,
1944  int samples_per_block;
1945  int blocks;
1946 
1947  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1948  samples_per_block = avctx->extradata[0] / 16;
1949  blocks = nb_samples / avctx->extradata[0];
1950  } else {
1951  samples_per_block = nb_samples / 16;
1952  blocks = 1;
1953  }
1954 
1955  for (int m = 0; m < blocks; m++) {
1956  for (int channel = 0; channel < avctx->channels; channel++) {
1957  int prev1 = c->status[channel].sample1;
1958  int prev2 = c->status[channel].sample2;
1959 
1960  samples = samples_p[channel] + m * 16;
1961  /* Read in every sample for this channel. */
1962  for (int i = 0; i < samples_per_block; i++) {
1963  int byte = bytestream2_get_byteu(&gb);
1964  int scale = 1 << (byte >> 4);
1965  int index = byte & 0xf;
1966  int factor1 = afc_coeffs[0][index];
1967  int factor2 = afc_coeffs[1][index];
1968 
1969  /* Decode 16 samples. */
1970  for (int n = 0; n < 16; n++) {
1971  int32_t sampledat;
1972 
1973  if (n & 1) {
1974  sampledat = sign_extend(byte, 4);
1975  } else {
1976  byte = bytestream2_get_byteu(&gb);
1977  sampledat = sign_extend(byte >> 4, 4);
1978  }
1979 
1980  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1981  sampledat * scale;
1982  *samples = av_clip_int16(sampledat);
1983  prev2 = prev1;
1984  prev1 = *samples++;
1985  }
1986  }
1987 
1988  c->status[channel].sample1 = prev1;
1989  c->status[channel].sample2 = prev2;
1990  }
1991  }
1992  bytestream2_seek(&gb, 0, SEEK_END);
1993  ) /* End of CASE */
1994 #if CONFIG_ADPCM_THP_DECODER || CONFIG_ADPCM_THP_LE_DECODER
1995  case AV_CODEC_ID_ADPCM_THP:
1997  {
1998  int table[14][16];
1999 
2000 #define THP_GET16(g) \
2001  sign_extend( \
2002  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
2003  bytestream2_get_le16u(&(g)) : \
2004  bytestream2_get_be16u(&(g)), 16)
2005 
2006  if (avctx->extradata) {
2008  if (avctx->extradata_size < 32 * avctx->channels) {
2009  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
2010  return AVERROR_INVALIDDATA;
2011  }
2012 
2013  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
2014  for (int i = 0; i < avctx->channels; i++)
2015  for (int n = 0; n < 16; n++)
2016  table[i][n] = THP_GET16(tb);
2017  } else {
2018  for (int i = 0; i < avctx->channels; i++)
2019  for (int n = 0; n < 16; n++)
2020  table[i][n] = THP_GET16(gb);
2021 
2022  if (!c->has_status) {
2023  /* Initialize the previous sample. */
2024  for (int i = 0; i < avctx->channels; i++) {
2025  c->status[i].sample1 = THP_GET16(gb);
2026  c->status[i].sample2 = THP_GET16(gb);
2027  }
2028  c->has_status = 1;
2029  } else {
2030  bytestream2_skip(&gb, avctx->channels * 4);
2031  }
2032  }
2033 
2034  for (int ch = 0; ch < avctx->channels; ch++) {
2035  samples = samples_p[ch];
2036 
2037  /* Read in every sample for this channel. */
2038  for (int i = 0; i < (nb_samples + 13) / 14; i++) {
2039  int byte = bytestream2_get_byteu(&gb);
2040  int index = (byte >> 4) & 7;
2041  unsigned int exp = byte & 0x0F;
2042  int64_t factor1 = table[ch][index * 2];
2043  int64_t factor2 = table[ch][index * 2 + 1];
2044 
2045  /* Decode 14 samples. */
2046  for (int n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
2047  int32_t sampledat;
2048 
2049  if (n & 1) {
2050  sampledat = sign_extend(byte, 4);
2051  } else {
2052  byte = bytestream2_get_byteu(&gb);
2053  sampledat = sign_extend(byte >> 4, 4);
2054  }
2055 
2056  sampledat = ((c->status[ch].sample1 * factor1
2057  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
2058  *samples = av_clip_int16(sampledat);
2059  c->status[ch].sample2 = c->status[ch].sample1;
2060  c->status[ch].sample1 = *samples++;
2061  }
2062  }
2063  }
2064  break;
2065  }
2066 #endif /* CONFIG_ADPCM_THP(_LE)_DECODER */
2067  CASE(ADPCM_DTK,
2068  for (int channel = 0; channel < avctx->channels; channel++) {
2069  samples = samples_p[channel];
2070 
2071  /* Read in every sample for this channel. */
2072  for (int i = 0; i < nb_samples / 28; i++) {
2073  int byte, header;
2074  if (channel)
2075  bytestream2_skipu(&gb, 1);
2076  header = bytestream2_get_byteu(&gb);
2077  bytestream2_skipu(&gb, 3 - channel);
2078 
2079  /* Decode 28 samples. */
2080  for (int n = 0; n < 28; n++) {
2081  int32_t sampledat, prev;
2082 
2083  switch (header >> 4) {
2084  case 1:
2085  prev = (c->status[channel].sample1 * 0x3c);
2086  break;
2087  case 2:
2088  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
2089  break;
2090  case 3:
2091  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
2092  break;
2093  default:
2094  prev = 0;
2095  }
2096 
2097  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
2098 
2099  byte = bytestream2_get_byteu(&gb);
2100  if (!channel)
2101  sampledat = sign_extend(byte, 4);
2102  else
2103  sampledat = sign_extend(byte >> 4, 4);
2104 
2105  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2106  *samples++ = av_clip_int16(sampledat >> 6);
2107  c->status[channel].sample2 = c->status[channel].sample1;
2108  c->status[channel].sample1 = sampledat;
2109  }
2110  }
2111  if (!channel)
2112  bytestream2_seek(&gb, 0, SEEK_SET);
2113  }
2114  ) /* End of CASE */
2115  CASE(ADPCM_PSX,
2116  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * avctx->channels); block++) {
2117  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * avctx->channels) / (16 * avctx->channels);
2118  for (int channel = 0; channel < avctx->channels; channel++) {
2119  samples = samples_p[channel] + block * nb_samples_per_block;
2120  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2121 
2122  /* Read in every sample for this channel. */
2123  for (int i = 0; i < nb_samples_per_block / 28; i++) {
2124  int filter, shift, flag, byte;
2125 
2126  filter = bytestream2_get_byteu(&gb);
2127  shift = filter & 0xf;
2128  filter = filter >> 4;
2130  return AVERROR_INVALIDDATA;
2131  flag = bytestream2_get_byteu(&gb) & 0x7;
2132 
2133  /* Decode 28 samples. */
2134  for (int n = 0; n < 28; n++) {
2135  int sample = 0, scale;
2136 
2137  if (n & 1) {
2138  scale = sign_extend(byte >> 4, 4);
2139  } else {
2140  byte = bytestream2_get_byteu(&gb);
2141  scale = sign_extend(byte, 4);
2142  }
2143 
2144  if (flag < 0x07) {
2145  scale = scale * (1 << 12);
2146  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2147  }
2149  c->status[channel].sample2 = c->status[channel].sample1;
2150  c->status[channel].sample1 = sample;
2151  }
2152  }
2153  }
2154  }
2155  ) /* End of CASE */
2156  CASE(ADPCM_ARGO,
2157  /*
2158  * The format of each block:
2159  * uint8_t left_control;
2160  * uint4_t left_samples[nb_samples];
2161  * ---- and if stereo ----
2162  * uint8_t right_control;
2163  * uint4_t right_samples[nb_samples];
2164  *
2165  * Format of the control byte:
2166  * MSB [SSSSRDRR] LSB
2167  * S = (Shift Amount - 2)
2168  * D = Decoder flag.
2169  * R = Reserved
2170  *
2171  * Each block relies on the previous two samples of each channel.
2172  * They should be 0 initially.
2173  */
2174  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2175  for (int channel = 0; channel < avctx->channels; channel++) {
2176  ADPCMChannelStatus *cs = c->status + channel;
2177  int control, shift;
2178 
2179  samples = samples_p[channel] + block * 32;
2180 
2181  /* Get the control byte and decode the samples, 2 at a time. */
2182  control = bytestream2_get_byteu(&gb);
2183  shift = (control >> 4) + 2;
2184 
2185  for (int n = 0; n < 16; n++) {
2186  int sample = bytestream2_get_byteu(&gb);
2187  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2188  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2189  }
2190  }
2191  }
2192  ) /* End of CASE */
2193  CASE(ADPCM_ZORK,
2194  for (int n = 0; n < nb_samples * avctx->channels; n++) {
2195  int v = bytestream2_get_byteu(&gb);
2196  *samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
2197  }
2198  ) /* End of CASE */
2199  CASE(ADPCM_IMA_MTF,
2200  for (int n = nb_samples / 2; n > 0; n--) {
2201  for (int channel = 0; channel < avctx->channels; channel++) {
2202  int v = bytestream2_get_byteu(&gb);
2203  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2204  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2205  }
2206  samples += avctx->channels;
2207  }
2208  ) /* End of CASE */
2209  default:
2210  av_assert0(0); // unsupported codec_id should not happen
2211  }
2212 
2213  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2214  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2215  return AVERROR_INVALIDDATA;
2216  }
2217 
2218  *got_frame_ptr = 1;
2219 
2220  if (avpkt->size < bytestream2_tell(&gb)) {
2221  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2222  return avpkt->size;
2223  }
2224 
2225  return bytestream2_tell(&gb);
2226 }
2227 
2228 static void adpcm_flush(AVCodecContext *avctx)
2229 {
2230  ADPCMDecodeContext *c = avctx->priv_data;
2231 
2232  /* Just nuke the entire state and re-init. */
2233  memset(c, 0, sizeof(ADPCMDecodeContext));
2234 
2235  switch(avctx->codec_id) {
2236  case AV_CODEC_ID_ADPCM_CT:
2237  c->status[0].step = c->status[1].step = 511;
2238  break;
2239 
2241  if (avctx->extradata && avctx->extradata_size >= 8) {
2242  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2243  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2244  }
2245  break;
2246 
2248  if (avctx->extradata && avctx->extradata_size >= 28) {
2249  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2250  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2251  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2252  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2253  }
2254  break;
2255 
2257  if (avctx->extradata && avctx->extradata_size >= 2)
2258  c->vqa_version = AV_RL16(avctx->extradata);
2259  break;
2260  default:
2261  /* Other codecs may want to handle this during decoding. */
2262  c->has_status = 0;
2263  return;
2264  }
2265 
2266  c->has_status = 1;
2267 }
2268 
2269 
2277 
2278 #define ADPCM_DECODER_0(id_, sample_fmts_, name_, long_name_)
2279 #define ADPCM_DECODER_1(id_, sample_fmts_, name_, long_name_) \
2280 const AVCodec ff_ ## name_ ## _decoder = { \
2281  .name = #name_, \
2282  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2283  .type = AVMEDIA_TYPE_AUDIO, \
2284  .id = id_, \
2285  .priv_data_size = sizeof(ADPCMDecodeContext), \
2286  .init = adpcm_decode_init, \
2287  .decode = adpcm_decode_frame, \
2288  .flush = adpcm_flush, \
2289  .capabilities = AV_CODEC_CAP_DR1, \
2290  .sample_fmts = sample_fmts_, \
2291  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
2292 };
2293 #define ADPCM_DECODER_2(enabled, codec_id, name, sample_fmts, long_name) \
2294  ADPCM_DECODER_ ## enabled(codec_id, name, sample_fmts, long_name)
2295 #define ADPCM_DECODER_3(config, codec_id, name, sample_fmts, long_name) \
2296  ADPCM_DECODER_2(config, codec_id, name, sample_fmts, long_name)
2297 #define ADPCM_DECODER(codec, name, sample_fmts, long_name) \
2298  ADPCM_DECODER_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, \
2299  name, sample_fmts, long_name)
2300 
2301 /* Note: Do not forget to add new entries to the Makefile as well. */
2302 ADPCM_DECODER(ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie")
2303 ADPCM_DECODER(ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC")
2304 ADPCM_DECODER(ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie")
2305 ADPCM_DECODER(ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA")
2306 ADPCM_DECODER(ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games")
2307 ADPCM_DECODER(ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology")
2308 ADPCM_DECODER(ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK")
2309 ADPCM_DECODER(ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts")
2310 ADPCM_DECODER(ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA")
2311 ADPCM_DECODER(ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1")
2312 ADPCM_DECODER(ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2")
2313 ADPCM_DECODER(ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3")
2314 ADPCM_DECODER(ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS")
2315 ADPCM_DECODER(ADPCM_IMA_ACORN, sample_fmts_s16, adpcm_ima_acorn, "ADPCM IMA Acorn Replay")
2316 ADPCM_DECODER(ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV")
2317 ADPCM_DECODER(ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC")
2318 ADPCM_DECODER(ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM")
2319 ADPCM_DECODER(ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments")
2320 ADPCM_DECODER(ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4")
2321 ADPCM_DECODER(ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3")
2322 ADPCM_DECODER(ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4")
2323 ADPCM_DECODER(ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS")
2324 ADPCM_DECODER(ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD")
2325 ADPCM_DECODER(ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS")
2326 ADPCM_DECODER(ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX")
2327 ADPCM_DECODER(ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework")
2328 ADPCM_DECODER(ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI")
2329 ADPCM_DECODER(ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime")
2330 ADPCM_DECODER(ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical")
2331 ADPCM_DECODER(ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive")
2332 ADPCM_DECODER(ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG")
2333 ADPCM_DECODER(ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP")
2334 ADPCM_DECODER(ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV")
2335 ADPCM_DECODER(ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood")
2336 ADPCM_DECODER(ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft")
2337 ADPCM_DECODER(ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF")
2338 ADPCM_DECODER(ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation")
2339 ADPCM_DECODER(ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit")
2340 ADPCM_DECODER(ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit")
2341 ADPCM_DECODER(ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit")
2342 ADPCM_DECODER(ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash")
2343 ADPCM_DECODER(ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)")
2344 ADPCM_DECODER(ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP")
2345 ADPCM_DECODER(ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA")
2346 ADPCM_DECODER(ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha")
2347 ADPCM_DECODER(ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork")
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:359
adpcm_index_table5
static const int8_t adpcm_index_table5[32]
Definition: adpcm.c:133
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:353
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:386
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
R3
#define R3
Definition: simple_idct.c:173
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:228
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:54
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:123
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
index
fg index
Definition: ffmpeg_filter.c:167
internal.h
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:401
AVPacket::data
uint8_t * data
Definition: packet.h:373
table
static const uint16_t table[]
Definition: prosumer.c:206
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:374
data
const char data[16]
Definition: mxf.c:143
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:391
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:385
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:500
R1
#define R1
Definition: simple_idct.c:171
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:389
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:587
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:58
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:365
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:392
GetBitContext
Definition: get_bits.h:62
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:441
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:392
val
static double val(void *priv, double ch)
Definition: aeval.c:76
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2228
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1388
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:370
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2272
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:418
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:607
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
AV_CODEC_ID_ADPCM_IMA_ACORN
@ AV_CODEC_ID_ADPCM_IMA_ACORN
Definition: codec_id.h:403
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:630
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:384
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:376
g
const char * g
Definition: vf_curves.c:117
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:355
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:382
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:421
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:360
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:380
channels
channels
Definition: aptx.h:33
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:358
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:526
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
if
if(ret)
Definition: filter_design.txt:179
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2270
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:378
av_clip_int16
#define av_clip_int16
Definition: common.h:111
NULL
#define NULL
Definition: coverity.c:32
ADPCM_DECODER
#define ADPCM_DECODER(codec, name, sample_fmts, long_name)
Definition: adpcm.c:2297
av_clip_intp2
#define av_clip_intp2
Definition: common.h:117
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:367
oki_step_table
static const int16_t oki_step_table[49]
Definition: adpcm.c:212
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:357
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:377
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:395
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:356
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:372
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:90
ima_cunning_index_table
static const int8_t ima_cunning_index_table[9]
Definition: adpcm.c:104
exp
int8_t exp
Definition: eval.c:72
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:361
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:566
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:545
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:397
afc_coeffs
static const int16_t afc_coeffs[2][16]
Definition: adpcm.c:85
ADPCMDecodeContext
Definition: adpcm.c:239
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:74
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1652
AVPacket::size
int size
Definition: packet.h:374
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:387
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:457
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:399
bps
unsigned bps
Definition: movenc.c:1597
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:39
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1000
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:838
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
R2
#define R2
Definition: simple_idct.c:172
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:366
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:67
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:164
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:665
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
adpcm_index_table3
static const int8_t adpcm_index_table3[8]
Definition: adpcm.c:128
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:993
AVCodec::id
enum AVCodecID id
Definition: codec.h:216
flag
#define flag(name)
Definition: cbs_av1.c:553
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1418
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2274
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:393
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:379
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:65
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
adpcm_index_tables
static const int8_t *const adpcm_index_tables[4]
Definition: adpcm.c:138
MT
#define MT(...)
Definition: codec_desc.c:31
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
delta
float delta
Definition: vorbis_enc_data.h:430
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:664
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:398
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
tb
#define tb
Definition: regdef.h:68
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:241
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:392
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:810
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:77
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:30
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:363
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:400
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1029
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:477
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:394
mtaf_stepsize
static const int16_t mtaf_stepsize[32][16]
Definition: adpcm.c:145
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:69
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:373
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:375
temp
else temp
Definition: vf_mcdeint.c:248
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
shift
static int shift(int a, int b)
Definition: sonic.c:83
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:371
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:35
adpcm_index_table2
static const int8_t adpcm_index_table2[4]
Definition: adpcm.c:123
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:368
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:753
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:396
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:247
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:242
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:402
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:354
d
d
Definition: ffmpeg_filter.c:153
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:1057
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:360
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:348
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:390
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:621
CASE
#define CASE(codec,...)
Definition: adpcm.c:73
ima_cunning_step_table
static const int16_t ima_cunning_step_table[61]
Definition: adpcm.c:114
int
int
Definition: ffmpeg_filter.c:153
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:232
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:369
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:240
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:221