FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 #include "avcodec.h"
38 #include "get_bits.h"
39 #include "bytestream.h"
40 #include "adpcm.h"
41 #include "adpcm_data.h"
42 #include "internal.h"
43 
44 /**
45  * @file
46  * ADPCM decoders
47  * Features and limitations:
48  *
49  * Reference documents:
50  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
51  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
52  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
53  * http://openquicktime.sourceforge.net/
54  * XAnim sources (xa_codec.c) http://xanim.polter.net/
55  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
56  * SoX source code http://sox.sourceforge.net/
57  *
58  * CD-ROM XA:
59  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
60  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
61  * readstr http://www.geocities.co.jp/Playtown/2004/
62  */
63 
64 /* These are for CD-ROM XA ADPCM */
65 static const int8_t xa_adpcm_table[5][2] = {
66  { 0, 0 },
67  { 60, 0 },
68  { 115, -52 },
69  { 98, -55 },
70  { 122, -60 }
71 };
72 
73 static const int16_t ea_adpcm_table[] = {
74  0, 240, 460, 392,
75  0, 0, -208, -220,
76  0, 1, 3, 4,
77  7, 8, 10, 11,
78  0, -1, -3, -4
79 };
80 
81 // padded to zero where table size is less then 16
82 static const int8_t swf_index_tables[4][16] = {
83  /*2*/ { -1, 2 },
84  /*3*/ { -1, -1, 2, 4 },
85  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
86  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
87 };
88 
89 static const int8_t zork_index_table[8] = {
90  -1, -1, -1, 1, 4, 7, 10, 12,
91 };
92 
93 static const int8_t mtf_index_table[16] = {
94  8, 6, 4, 2, -1, -1, -1, -1,
95  -1, -1, -1, -1, 2, 4, 6, 8,
96 };
97 
98 /* end of tables */
99 
100 typedef struct ADPCMDecodeContext {
102  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
105 
107 {
108  ADPCMDecodeContext *c = avctx->priv_data;
109  unsigned int min_channels = 1;
110  unsigned int max_channels = 2;
111 
112  switch(avctx->codec->id) {
115  max_channels = 1;
116  break;
119  min_channels = 2;
120  break;
127  max_channels = 6;
128  break;
130  min_channels = 2;
131  max_channels = 8;
132  if (avctx->channels & 1) {
133  avpriv_request_sample(avctx, "channel count %d", avctx->channels);
134  return AVERROR_PATCHWELCOME;
135  }
136  break;
138  max_channels = 8;
139  if (avctx->channels <= 0 || avctx->block_align % (16 * avctx->channels))
140  return AVERROR_INVALIDDATA;
141  break;
145  max_channels = 14;
146  break;
147  }
148  if (avctx->channels < min_channels || avctx->channels > max_channels) {
149  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
150  return AVERROR(EINVAL);
151  }
152 
153  switch(avctx->codec->id) {
155  c->status[0].step = c->status[1].step = 511;
156  break;
158  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
159  return AVERROR_INVALIDDATA;
160  break;
162  if (avctx->extradata && avctx->extradata_size >= 8) {
163  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
164  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
165  }
166  break;
168  if (avctx->extradata) {
169  if (avctx->extradata_size >= 28) {
170  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
171  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
172  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
173  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
174  } else if (avctx->extradata_size >= 16) {
175  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18);
176  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88);
177  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18);
178  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88);
179  }
180  }
181  break;
183  if (avctx->extradata && avctx->extradata_size >= 2)
184  c->vqa_version = AV_RL16(avctx->extradata);
185  break;
187  if (avctx->bits_per_coded_sample != 4 || avctx->block_align != 17 * avctx->channels)
188  return AVERROR_INVALIDDATA;
189  break;
191  if (avctx->bits_per_coded_sample != 8)
192  return AVERROR_INVALIDDATA;
193  break;
194  default:
195  break;
196  }
197 
198  switch (avctx->codec->id) {
218  break;
220  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
222  break;
224  avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
226  break;
227  default:
228  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
229  }
230 
231  return 0;
232 }
233 
234 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
235 {
236  int delta, pred, step, add;
237 
238  pred = c->predictor;
239  delta = nibble & 7;
240  step = c->step;
241  add = (delta * 2 + 1) * step;
242  if (add < 0)
243  add = add + 7;
244 
245  if ((nibble & 8) == 0)
246  pred = av_clip(pred + (add >> 3), -32767, 32767);
247  else
248  pred = av_clip(pred - (add >> 3), -32767, 32767);
249 
250  switch (delta) {
251  case 7:
252  step *= 0x99;
253  break;
254  case 6:
255  c->step = av_clip(c->step * 2, 127, 24576);
256  c->predictor = pred;
257  return pred;
258  case 5:
259  step *= 0x66;
260  break;
261  case 4:
262  step *= 0x4d;
263  break;
264  default:
265  step *= 0x39;
266  break;
267  }
268 
269  if (step < 0)
270  step += 0x3f;
271 
272  c->step = step >> 6;
273  c->step = av_clip(c->step, 127, 24576);
274  c->predictor = pred;
275  return pred;
276 }
277 
278 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
279 {
280  int step_index;
281  int predictor;
282  int sign, delta, diff, step;
283 
284  step = ff_adpcm_step_table[c->step_index];
285  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
286  step_index = av_clip(step_index, 0, 88);
287 
288  sign = nibble & 8;
289  delta = nibble & 7;
290  /* perform direct multiplication instead of series of jumps proposed by
291  * the reference ADPCM implementation since modern CPUs can do the mults
292  * quickly enough */
293  diff = ((2 * delta + 1) * step) >> shift;
294  predictor = c->predictor;
295  if (sign) predictor -= diff;
296  else predictor += diff;
297 
298  c->predictor = av_clip_int16(predictor);
299  c->step_index = step_index;
300 
301  return (int16_t)c->predictor;
302 }
303 
304 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
305 {
306  int step_index;
307  int predictor;
308  int sign, delta, diff, step;
309 
310  step = ff_adpcm_step_table[c->step_index];
311  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
312  step_index = av_clip(step_index, 0, 88);
313 
314  sign = nibble & 8;
315  delta = nibble & 7;
316  diff = (delta * step) >> shift;
317  predictor = c->predictor;
318  if (sign) predictor -= diff;
319  else predictor += diff;
320 
321  c->predictor = av_clip_int16(predictor);
322  c->step_index = step_index;
323 
324  return (int16_t)c->predictor;
325 }
326 
327 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
328 {
329  int step_index, step, delta, predictor;
330 
331  step = ff_adpcm_step_table[c->step_index];
332 
333  delta = step * (2 * nibble - 15);
334  predictor = c->predictor + delta;
335 
336  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
337  c->predictor = av_clip_int16(predictor >> 4);
338  c->step_index = av_clip(step_index, 0, 88);
339 
340  return (int16_t)c->predictor;
341 }
342 
343 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
344 {
345  int step_index;
346  int predictor;
347  int step;
348 
349  nibble = sign_extend(nibble & 0xF, 4);
350 
352  step_index = c->step_index + ff_adpcm_ima_cunning_index_table[abs(nibble)];
353  step_index = av_clip(step_index, 0, 60);
354 
355  predictor = c->predictor + step * nibble;
356 
357  c->predictor = av_clip_int16(predictor);
358  c->step_index = step_index;
359 
360  return c->predictor;
361 }
362 
364 {
365  int nibble, step_index, predictor, sign, delta, diff, step, shift;
366 
367  shift = bps - 1;
368  nibble = get_bits_le(gb, bps),
369  step = ff_adpcm_step_table[c->step_index];
370  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
371  step_index = av_clip(step_index, 0, 88);
372 
373  sign = nibble & (1 << shift);
374  delta = av_mod_uintp2(nibble, shift);
375  diff = ((2 * delta + 1) * step) >> shift;
376  predictor = c->predictor;
377  if (sign) predictor -= diff;
378  else predictor += diff;
379 
380  c->predictor = av_clip_int16(predictor);
381  c->step_index = step_index;
382 
383  return (int16_t)c->predictor;
384 }
385 
386 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
387 {
388  int step_index;
389  int predictor;
390  int diff, step;
391 
392  step = ff_adpcm_step_table[c->step_index];
393  step_index = c->step_index + ff_adpcm_index_table[nibble];
394  step_index = av_clip(step_index, 0, 88);
395 
396  diff = step >> 3;
397  if (nibble & 4) diff += step;
398  if (nibble & 2) diff += step >> 1;
399  if (nibble & 1) diff += step >> 2;
400 
401  if (nibble & 8)
402  predictor = c->predictor - diff;
403  else
404  predictor = c->predictor + diff;
405 
406  c->predictor = av_clip_int16(predictor);
407  c->step_index = step_index;
408 
409  return c->predictor;
410 }
411 
412 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
413 {
414  int predictor;
415 
416  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
417  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
418 
419  c->sample2 = c->sample1;
420  c->sample1 = av_clip_int16(predictor);
421  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
422  if (c->idelta < 16) c->idelta = 16;
423  if (c->idelta > INT_MAX/768) {
424  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
425  c->idelta = INT_MAX/768;
426  }
427 
428  return c->sample1;
429 }
430 
431 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
432 {
433  int step_index, predictor, sign, delta, diff, step;
434 
436  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
437  step_index = av_clip(step_index, 0, 48);
438 
439  sign = nibble & 8;
440  delta = nibble & 7;
441  diff = ((2 * delta + 1) * step) >> 3;
442  predictor = c->predictor;
443  if (sign) predictor -= diff;
444  else predictor += diff;
445 
446  c->predictor = av_clip_intp2(predictor, 11);
447  c->step_index = step_index;
448 
449  return c->predictor * 16;
450 }
451 
452 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
453 {
454  int sign, delta, diff;
455  int new_step;
456 
457  sign = nibble & 8;
458  delta = nibble & 7;
459  /* perform direct multiplication instead of series of jumps proposed by
460  * the reference ADPCM implementation since modern CPUs can do the mults
461  * quickly enough */
462  diff = ((2 * delta + 1) * c->step) >> 3;
463  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
464  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
465  c->predictor = av_clip_int16(c->predictor);
466  /* calculate new step and clamp it to range 511..32767 */
467  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
468  c->step = av_clip(new_step, 511, 32767);
469 
470  return (int16_t)c->predictor;
471 }
472 
473 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
474 {
475  int sign, delta, diff;
476 
477  sign = nibble & (1<<(size-1));
478  delta = nibble & ((1<<(size-1))-1);
479  diff = delta << (7 + c->step + shift);
480 
481  /* clamp result */
482  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
483 
484  /* calculate new step */
485  if (delta >= (2*size - 3) && c->step < 3)
486  c->step++;
487  else if (delta == 0 && c->step > 0)
488  c->step--;
489 
490  return (int16_t) c->predictor;
491 }
492 
494 {
495  if(!c->step) {
496  c->predictor = 0;
497  c->step = 127;
498  }
499 
500  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
501  c->predictor = av_clip_int16(c->predictor);
502  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
503  c->step = av_clip(c->step, 127, 24576);
504  return c->predictor;
505 }
506 
507 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
508 {
509  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
510  c->predictor = av_clip_int16(c->predictor);
511  c->step += ff_adpcm_index_table[nibble];
512  c->step = av_clip_uintp2(c->step, 5);
513  return c->predictor;
514 }
515 
516 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
517 {
518  int16_t index = c->step_index;
519  uint32_t lookup_sample = ff_adpcm_step_table[index];
520  int32_t sample = 0;
521 
522  if (nibble & 0x40)
523  sample += lookup_sample;
524  if (nibble & 0x20)
525  sample += lookup_sample >> 1;
526  if (nibble & 0x10)
527  sample += lookup_sample >> 2;
528  if (nibble & 0x08)
529  sample += lookup_sample >> 3;
530  if (nibble & 0x04)
531  sample += lookup_sample >> 4;
532  if (nibble & 0x02)
533  sample += lookup_sample >> 5;
534  if (nibble & 0x01)
535  sample += lookup_sample >> 6;
536  if (nibble & 0x80)
537  sample = -sample;
538 
539  sample += c->predictor;
540  sample = av_clip_int16(sample);
541 
542  index += zork_index_table[(nibble >> 4) & 7];
543  index = av_clip(index, 0, 88);
544 
545  c->predictor = sample;
546  c->step_index = index;
547 
548  return sample;
549 }
550 
551 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
553  ADPCMChannelStatus *right, int channels, int sample_offset)
554 {
555  int i, j;
556  int shift,filter,f0,f1;
557  int s_1,s_2;
558  int d,s,t;
559 
560  out0 += sample_offset;
561  if (channels == 1)
562  out1 = out0 + 28;
563  else
564  out1 += sample_offset;
565 
566  for(i=0;i<4;i++) {
567  shift = 12 - (in[4+i*2] & 15);
568  filter = in[4+i*2] >> 4;
569  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
570  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
571  filter=0;
572  }
573  if (shift < 0) {
574  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
575  shift = 0;
576  }
577  f0 = xa_adpcm_table[filter][0];
578  f1 = xa_adpcm_table[filter][1];
579 
580  s_1 = left->sample1;
581  s_2 = left->sample2;
582 
583  for(j=0;j<28;j++) {
584  d = in[16+i+j*4];
585 
586  t = sign_extend(d, 4);
587  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
588  s_2 = s_1;
589  s_1 = av_clip_int16(s);
590  out0[j] = s_1;
591  }
592 
593  if (channels == 2) {
594  left->sample1 = s_1;
595  left->sample2 = s_2;
596  s_1 = right->sample1;
597  s_2 = right->sample2;
598  }
599 
600  shift = 12 - (in[5+i*2] & 15);
601  filter = in[5+i*2] >> 4;
602  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
603  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
604  filter=0;
605  }
606  if (shift < 0) {
607  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
608  shift = 0;
609  }
610 
611  f0 = xa_adpcm_table[filter][0];
612  f1 = xa_adpcm_table[filter][1];
613 
614  for(j=0;j<28;j++) {
615  d = in[16+i+j*4];
616 
617  t = sign_extend(d >> 4, 4);
618  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
619  s_2 = s_1;
620  s_1 = av_clip_int16(s);
621  out1[j] = s_1;
622  }
623 
624  if (channels == 2) {
625  right->sample1 = s_1;
626  right->sample2 = s_2;
627  } else {
628  left->sample1 = s_1;
629  left->sample2 = s_2;
630  }
631 
632  out0 += 28 * (3 - channels);
633  out1 += 28 * (3 - channels);
634  }
635 
636  return 0;
637 }
638 
639 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
640 {
641  ADPCMDecodeContext *c = avctx->priv_data;
642  GetBitContext gb;
643  const int8_t *table;
644  int k0, signmask, nb_bits, count;
645  int size = buf_size*8;
646  int i;
647 
648  init_get_bits(&gb, buf, size);
649 
650  //read bits & initial values
651  nb_bits = get_bits(&gb, 2)+2;
652  table = swf_index_tables[nb_bits-2];
653  k0 = 1 << (nb_bits-2);
654  signmask = 1 << (nb_bits-1);
655 
656  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
657  for (i = 0; i < avctx->channels; i++) {
658  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
659  c->status[i].step_index = get_bits(&gb, 6);
660  }
661 
662  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
663  int i;
664 
665  for (i = 0; i < avctx->channels; i++) {
666  // similar to IMA adpcm
667  int delta = get_bits(&gb, nb_bits);
669  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
670  int k = k0;
671 
672  do {
673  if (delta & k)
674  vpdiff += step;
675  step >>= 1;
676  k >>= 1;
677  } while(k);
678  vpdiff += step;
679 
680  if (delta & signmask)
681  c->status[i].predictor -= vpdiff;
682  else
683  c->status[i].predictor += vpdiff;
684 
685  c->status[i].step_index += table[delta & (~signmask)];
686 
687  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
688  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
689 
690  *samples++ = c->status[i].predictor;
691  }
692  }
693  }
694 }
695 
696 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
697 {
698  int sample = sign_extend(nibble, 4) * (1 << shift);
699 
700  if (flag)
701  sample += (8 * cs->sample1) - (4 * cs->sample2);
702  else
703  sample += 4 * cs->sample1;
704 
705  sample = av_clip_int16(sample >> 2);
706 
707  cs->sample2 = cs->sample1;
708  cs->sample1 = sample;
709 
710  return sample;
711 }
712 
713 /**
714  * Get the number of samples (per channel) that will be decoded from the packet.
715  * In one case, this is actually the maximum number of samples possible to
716  * decode with the given buf_size.
717  *
718  * @param[out] coded_samples set to the number of samples as coded in the
719  * packet, or 0 if the codec does not encode the
720  * number of samples in each frame.
721  * @param[out] approx_nb_samples set to non-zero if the number of samples
722  * returned is an approximation.
723  */
725  int buf_size, int *coded_samples, int *approx_nb_samples)
726 {
727  ADPCMDecodeContext *s = avctx->priv_data;
728  int nb_samples = 0;
729  int ch = avctx->channels;
730  int has_coded_samples = 0;
731  int header_size;
732 
733  *coded_samples = 0;
734  *approx_nb_samples = 0;
735 
736  if(ch <= 0)
737  return 0;
738 
739  switch (avctx->codec->id) {
740  /* constant, only check buf_size */
742  if (buf_size < 76 * ch)
743  return 0;
744  nb_samples = 128;
745  break;
747  if (buf_size < 34 * ch)
748  return 0;
749  nb_samples = 64;
750  break;
751  /* simple 4-bit adpcm */
764  nb_samples = buf_size * 2 / ch;
765  break;
766  }
767  if (nb_samples)
768  return nb_samples;
769 
770  /* simple 4-bit adpcm, with header */
771  header_size = 0;
772  switch (avctx->codec->id) {
777  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
778  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
779  }
780  if (header_size > 0)
781  return (buf_size - header_size) * 2 / ch;
782 
783  /* more complex formats */
784  switch (avctx->codec->id) {
786  bytestream2_skip(gb, 4);
787  has_coded_samples = 1;
788  *coded_samples = bytestream2_get_le32u(gb);
789  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
790  bytestream2_seek(gb, -8, SEEK_CUR);
791  break;
793  has_coded_samples = 1;
794  *coded_samples = bytestream2_get_le32(gb);
795  *coded_samples -= *coded_samples % 28;
796  nb_samples = (buf_size - 12) / 30 * 28;
797  break;
799  has_coded_samples = 1;
800  *coded_samples = bytestream2_get_le32(gb);
801  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
802  break;
804  nb_samples = (buf_size - ch) / ch * 2;
805  break;
809  /* maximum number of samples */
810  /* has internal offsets and a per-frame switch to signal raw 16-bit */
811  has_coded_samples = 1;
812  switch (avctx->codec->id) {
814  header_size = 4 + 9 * ch;
815  *coded_samples = bytestream2_get_le32(gb);
816  break;
818  header_size = 4 + 5 * ch;
819  *coded_samples = bytestream2_get_le32(gb);
820  break;
822  header_size = 4 + 5 * ch;
823  *coded_samples = bytestream2_get_be32(gb);
824  break;
825  }
826  *coded_samples -= *coded_samples % 28;
827  nb_samples = (buf_size - header_size) * 2 / ch;
828  nb_samples -= nb_samples % 28;
829  *approx_nb_samples = 1;
830  break;
832  if (avctx->block_align > 0)
833  buf_size = FFMIN(buf_size, avctx->block_align);
834  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
835  break;
837  if (avctx->block_align > 0)
838  buf_size = FFMIN(buf_size, avctx->block_align);
839  if (buf_size < 4 * ch)
840  return AVERROR_INVALIDDATA;
841  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
842  break;
844  if (avctx->block_align > 0)
845  buf_size = FFMIN(buf_size, avctx->block_align);
846  nb_samples = (buf_size - 4 * ch) * 2 / ch;
847  break;
849  {
850  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
851  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
852  if (avctx->block_align > 0)
853  buf_size = FFMIN(buf_size, avctx->block_align);
854  if (buf_size < 4 * ch)
855  return AVERROR_INVALIDDATA;
856  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
857  break;
858  }
860  if (avctx->block_align > 0)
861  buf_size = FFMIN(buf_size, avctx->block_align);
862  nb_samples = (buf_size - 6 * ch) * 2 / ch;
863  break;
865  if (avctx->block_align > 0)
866  buf_size = FFMIN(buf_size, avctx->block_align);
867  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
868  break;
872  {
873  int samples_per_byte;
874  switch (avctx->codec->id) {
875  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
876  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
877  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
878  }
879  if (!s->status[0].step_index) {
880  if (buf_size < ch)
881  return AVERROR_INVALIDDATA;
882  nb_samples++;
883  buf_size -= ch;
884  }
885  nb_samples += buf_size * samples_per_byte / ch;
886  break;
887  }
889  {
890  int buf_bits = buf_size * 8 - 2;
891  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
892  int block_hdr_size = 22 * ch;
893  int block_size = block_hdr_size + nbits * ch * 4095;
894  int nblocks = buf_bits / block_size;
895  int bits_left = buf_bits - nblocks * block_size;
896  nb_samples = nblocks * 4096;
897  if (bits_left >= block_hdr_size)
898  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
899  break;
900  }
903  if (avctx->extradata) {
904  nb_samples = buf_size * 14 / (8 * ch);
905  break;
906  }
907  has_coded_samples = 1;
908  bytestream2_skip(gb, 4); // channel size
909  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
910  bytestream2_get_le32(gb) :
911  bytestream2_get_be32(gb);
912  buf_size -= 8 + 36 * ch;
913  buf_size /= ch;
914  nb_samples = buf_size / 8 * 14;
915  if (buf_size % 8 > 1)
916  nb_samples += (buf_size % 8 - 1) * 2;
917  *approx_nb_samples = 1;
918  break;
920  nb_samples = buf_size / (9 * ch) * 16;
921  break;
923  nb_samples = (buf_size / 128) * 224 / ch;
924  break;
927  nb_samples = buf_size / (16 * ch) * 28;
928  break;
930  nb_samples = buf_size / avctx->block_align * 32;
931  break;
933  nb_samples = buf_size / ch;
934  break;
935  }
936 
937  /* validate coded sample count */
938  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
939  return AVERROR_INVALIDDATA;
940 
941  return nb_samples;
942 }
943 
944 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
945  int *got_frame_ptr, AVPacket *avpkt)
946 {
947  AVFrame *frame = data;
948  const uint8_t *buf = avpkt->data;
949  int buf_size = avpkt->size;
950  ADPCMDecodeContext *c = avctx->priv_data;
951  ADPCMChannelStatus *cs;
952  int n, m, channel, i;
953  int16_t *samples;
954  int16_t **samples_p;
955  int st; /* stereo */
956  int count1, count2;
957  int nb_samples, coded_samples, approx_nb_samples, ret;
958  GetByteContext gb;
959 
960  bytestream2_init(&gb, buf, buf_size);
961  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
962  if (nb_samples <= 0) {
963  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
964  return AVERROR_INVALIDDATA;
965  }
966 
967  /* get output buffer */
968  frame->nb_samples = nb_samples;
969  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
970  return ret;
971  samples = (int16_t *)frame->data[0];
972  samples_p = (int16_t **)frame->extended_data;
973 
974  /* use coded_samples when applicable */
975  /* it is always <= nb_samples, so the output buffer will be large enough */
976  if (coded_samples) {
977  if (!approx_nb_samples && coded_samples != nb_samples)
978  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
979  frame->nb_samples = nb_samples = coded_samples;
980  }
981 
982  st = avctx->channels == 2 ? 1 : 0;
983 
984  switch(avctx->codec->id) {
986  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
987  Channel data is interleaved per-chunk. */
988  for (channel = 0; channel < avctx->channels; channel++) {
989  int predictor;
990  int step_index;
991  cs = &(c->status[channel]);
992  /* (pppppp) (piiiiiii) */
993 
994  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
995  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
996  step_index = predictor & 0x7F;
997  predictor &= ~0x7F;
998 
999  if (cs->step_index == step_index) {
1000  int diff = predictor - cs->predictor;
1001  if (diff < 0)
1002  diff = - diff;
1003  if (diff > 0x7f)
1004  goto update;
1005  } else {
1006  update:
1007  cs->step_index = step_index;
1008  cs->predictor = predictor;
1009  }
1010 
1011  if (cs->step_index > 88u){
1012  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1013  channel, cs->step_index);
1014  return AVERROR_INVALIDDATA;
1015  }
1016 
1017  samples = samples_p[channel];
1018 
1019  for (m = 0; m < 64; m += 2) {
1020  int byte = bytestream2_get_byteu(&gb);
1021  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1022  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1023  }
1024  }
1025  break;
1027  for(i=0; i<avctx->channels; i++){
1028  cs = &(c->status[i]);
1029  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1030 
1031  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1032  if (cs->step_index > 88u){
1033  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1034  i, cs->step_index);
1035  return AVERROR_INVALIDDATA;
1036  }
1037  }
1038 
1039  if (avctx->bits_per_coded_sample != 4) {
1040  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1041  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1043  GetBitContext g;
1044 
1045  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1046  for (i = 0; i < avctx->channels; i++) {
1047  int j;
1048 
1049  cs = &c->status[i];
1050  samples = &samples_p[i][1 + n * samples_per_block];
1051  for (j = 0; j < block_size; j++) {
1052  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
1053  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
1054  }
1055  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1056  if (ret < 0)
1057  return ret;
1058  for (m = 0; m < samples_per_block; m++) {
1059  samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
1060  avctx->bits_per_coded_sample);
1061  }
1062  }
1063  }
1064  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
1065  } else {
1066  for (n = 0; n < (nb_samples - 1) / 8; n++) {
1067  for (i = 0; i < avctx->channels; i++) {
1068  cs = &c->status[i];
1069  samples = &samples_p[i][1 + n * 8];
1070  for (m = 0; m < 8; m += 2) {
1071  int v = bytestream2_get_byteu(&gb);
1072  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1073  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1074  }
1075  }
1076  }
1077  }
1078  break;
1079  case AV_CODEC_ID_ADPCM_4XM:
1080  for (i = 0; i < avctx->channels; i++)
1081  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1082 
1083  for (i = 0; i < avctx->channels; i++) {
1084  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1085  if (c->status[i].step_index > 88u) {
1086  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1087  i, c->status[i].step_index);
1088  return AVERROR_INVALIDDATA;
1089  }
1090  }
1091 
1092  for (i = 0; i < avctx->channels; i++) {
1093  samples = (int16_t *)frame->data[i];
1094  cs = &c->status[i];
1095  for (n = nb_samples >> 1; n > 0; n--) {
1096  int v = bytestream2_get_byteu(&gb);
1097  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1098  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1099  }
1100  }
1101  break;
1102  case AV_CODEC_ID_ADPCM_AGM:
1103  for (i = 0; i < avctx->channels; i++)
1104  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1105  for (i = 0; i < avctx->channels; i++)
1106  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1107 
1108  for (n = 0; n < nb_samples >> (1 - st); n++) {
1109  int v = bytestream2_get_byteu(&gb);
1110  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1111  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1112  }
1113  break;
1114  case AV_CODEC_ID_ADPCM_MS:
1115  {
1116  int block_predictor;
1117 
1118  if (avctx->channels > 2) {
1119  for (channel = 0; channel < avctx->channels; channel++) {
1120  samples = samples_p[channel];
1121  block_predictor = bytestream2_get_byteu(&gb);
1122  if (block_predictor > 6) {
1123  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1124  channel, block_predictor);
1125  return AVERROR_INVALIDDATA;
1126  }
1127  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1128  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1129  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1130  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1131  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1132  *samples++ = c->status[channel].sample2;
1133  *samples++ = c->status[channel].sample1;
1134  for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1135  int byte = bytestream2_get_byteu(&gb);
1136  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1137  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1138  }
1139  }
1140  } else {
1141  block_predictor = bytestream2_get_byteu(&gb);
1142  if (block_predictor > 6) {
1143  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1144  block_predictor);
1145  return AVERROR_INVALIDDATA;
1146  }
1147  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1148  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1149  if (st) {
1150  block_predictor = bytestream2_get_byteu(&gb);
1151  if (block_predictor > 6) {
1152  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1153  block_predictor);
1154  return AVERROR_INVALIDDATA;
1155  }
1156  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1157  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1158  }
1159  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1160  if (st){
1161  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1162  }
1163 
1164  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1165  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1166  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1167  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1168 
1169  *samples++ = c->status[0].sample2;
1170  if (st) *samples++ = c->status[1].sample2;
1171  *samples++ = c->status[0].sample1;
1172  if (st) *samples++ = c->status[1].sample1;
1173  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1174  int byte = bytestream2_get_byteu(&gb);
1175  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1176  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1177  }
1178  }
1179  break;
1180  }
1182  for (channel = 0; channel < avctx->channels; channel+=2) {
1183  bytestream2_skipu(&gb, 4);
1184  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1185  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1186  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1187  bytestream2_skipu(&gb, 2);
1188  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1189  bytestream2_skipu(&gb, 2);
1190  for (n = 0; n < nb_samples; n+=2) {
1191  int v = bytestream2_get_byteu(&gb);
1192  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1193  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1194  }
1195  for (n = 0; n < nb_samples; n+=2) {
1196  int v = bytestream2_get_byteu(&gb);
1197  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1198  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1199  }
1200  }
1201  break;
1203  for (channel = 0; channel < avctx->channels; channel++) {
1204  cs = &c->status[channel];
1205  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1206  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1207  if (cs->step_index > 88u){
1208  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1209  channel, cs->step_index);
1210  return AVERROR_INVALIDDATA;
1211  }
1212  }
1213  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1214  int v = bytestream2_get_byteu(&gb);
1215  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1216  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1217  }
1218  break;
1220  {
1221  int last_byte = 0;
1222  int nibble;
1223  int decode_top_nibble_next = 0;
1224  int diff_channel;
1225  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1226 
1227  bytestream2_skipu(&gb, 10);
1228  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1229  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1230  c->status[0].step_index = bytestream2_get_byteu(&gb);
1231  c->status[1].step_index = bytestream2_get_byteu(&gb);
1232  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1233  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1234  c->status[0].step_index, c->status[1].step_index);
1235  return AVERROR_INVALIDDATA;
1236  }
1237  /* sign extend the predictors */
1238  diff_channel = c->status[1].predictor;
1239 
1240  /* DK3 ADPCM support macro */
1241 #define DK3_GET_NEXT_NIBBLE() \
1242  if (decode_top_nibble_next) { \
1243  nibble = last_byte >> 4; \
1244  decode_top_nibble_next = 0; \
1245  } else { \
1246  last_byte = bytestream2_get_byteu(&gb); \
1247  nibble = last_byte & 0x0F; \
1248  decode_top_nibble_next = 1; \
1249  }
1250 
1251  while (samples < samples_end) {
1252 
1253  /* for this algorithm, c->status[0] is the sum channel and
1254  * c->status[1] is the diff channel */
1255 
1256  /* process the first predictor of the sum channel */
1258  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1259 
1260  /* process the diff channel predictor */
1262  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1263 
1264  /* process the first pair of stereo PCM samples */
1265  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1266  *samples++ = c->status[0].predictor + c->status[1].predictor;
1267  *samples++ = c->status[0].predictor - c->status[1].predictor;
1268 
1269  /* process the second predictor of the sum channel */
1271  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1272 
1273  /* process the second pair of stereo PCM samples */
1274  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1275  *samples++ = c->status[0].predictor + c->status[1].predictor;
1276  *samples++ = c->status[0].predictor - c->status[1].predictor;
1277  }
1278 
1279  if ((bytestream2_tell(&gb) & 1))
1280  bytestream2_skip(&gb, 1);
1281  break;
1282  }
1284  for (channel = 0; channel < avctx->channels; channel++) {
1285  cs = &c->status[channel];
1286  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1287  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1288  if (cs->step_index > 88u){
1289  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1290  channel, cs->step_index);
1291  return AVERROR_INVALIDDATA;
1292  }
1293  }
1294 
1295  for (n = nb_samples >> (1 - st); n > 0; n--) {
1296  int v1, v2;
1297  int v = bytestream2_get_byteu(&gb);
1298  /* nibbles are swapped for mono */
1299  if (st) {
1300  v1 = v >> 4;
1301  v2 = v & 0x0F;
1302  } else {
1303  v2 = v >> 4;
1304  v1 = v & 0x0F;
1305  }
1306  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1307  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1308  }
1309  break;
1311  for (channel = 0; channel < avctx->channels; channel++) {
1312  cs = &c->status[channel];
1313  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1314  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1315  if (cs->step_index > 88u){
1316  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1317  channel, cs->step_index);
1318  return AVERROR_INVALIDDATA;
1319  }
1320  }
1321 
1322  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1323  for (channel = 0; channel < avctx->channels; channel++) {
1324  samples = samples_p[channel] + 256 * subframe;
1325  for (n = 0; n < 256; n += 2) {
1326  int v = bytestream2_get_byteu(&gb);
1327  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1328  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1329  }
1330  }
1331  }
1332  break;
1334  for (channel = 0; channel < avctx->channels; channel++) {
1335  cs = &c->status[channel];
1336  samples = samples_p[channel];
1337  bytestream2_skip(&gb, 4);
1338  for (n = 0; n < nb_samples; n += 2) {
1339  int v = bytestream2_get_byteu(&gb);
1340  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1341  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1342  }
1343  }
1344  break;
1346  for (n = nb_samples >> (1 - st); n > 0; n--) {
1347  int v = bytestream2_get_byteu(&gb);
1348  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1349  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1350  }
1351  break;
1353  for (n = nb_samples >> (1 - st); n > 0; n--) {
1354  int v = bytestream2_get_byteu(&gb);
1355  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1356  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1357  }
1358  break;
1360  for (n = nb_samples / 2; n > 0; n--) {
1361  for (channel = 0; channel < avctx->channels; channel++) {
1362  int v = bytestream2_get_byteu(&gb);
1363  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1364  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1365  }
1366  samples += avctx->channels;
1367  }
1368  break;
1370  for (n = nb_samples / 2; n > 0; n--) {
1371  for (channel = 0; channel < avctx->channels; channel++) {
1372  int v = bytestream2_get_byteu(&gb);
1373  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1374  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1375  }
1376  samples += avctx->channels;
1377  }
1378  break;
1380  for (n = 0; n < nb_samples / 2; n++) {
1381  int v = bytestream2_get_byteu(&gb);
1382  *samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v & 0x0F);
1383  *samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v >> 4);
1384  }
1385  break;
1387  for (n = nb_samples >> (1 - st); n > 0; n--) {
1388  int v = bytestream2_get_byteu(&gb);
1389  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1390  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1391  }
1392  break;
1394  for (channel = 0; channel < avctx->channels; channel++) {
1395  cs = &c->status[channel];
1396  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1397  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1398  if (cs->step_index > 88u){
1399  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1400  channel, cs->step_index);
1401  return AVERROR_INVALIDDATA;
1402  }
1403  }
1404  for (n = 0; n < nb_samples / 2; n++) {
1405  int byte[2];
1406 
1407  byte[0] = bytestream2_get_byteu(&gb);
1408  if (st)
1409  byte[1] = bytestream2_get_byteu(&gb);
1410  for(channel = 0; channel < avctx->channels; channel++) {
1411  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1412  }
1413  for(channel = 0; channel < avctx->channels; channel++) {
1414  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1415  }
1416  }
1417  break;
1419  if (c->vqa_version == 3) {
1420  for (channel = 0; channel < avctx->channels; channel++) {
1421  int16_t *smp = samples_p[channel];
1422 
1423  for (n = nb_samples / 2; n > 0; n--) {
1424  int v = bytestream2_get_byteu(&gb);
1425  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1426  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1427  }
1428  }
1429  } else {
1430  for (n = nb_samples / 2; n > 0; n--) {
1431  for (channel = 0; channel < avctx->channels; channel++) {
1432  int v = bytestream2_get_byteu(&gb);
1433  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1434  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1435  }
1436  samples += avctx->channels;
1437  }
1438  }
1439  bytestream2_seek(&gb, 0, SEEK_END);
1440  break;
1441  case AV_CODEC_ID_ADPCM_XA:
1442  {
1443  int16_t *out0 = samples_p[0];
1444  int16_t *out1 = samples_p[1];
1445  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1446  int sample_offset = 0;
1447  int bytes_remaining;
1448  while (bytestream2_get_bytes_left(&gb) >= 128) {
1449  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1450  &c->status[0], &c->status[1],
1451  avctx->channels, sample_offset)) < 0)
1452  return ret;
1453  bytestream2_skipu(&gb, 128);
1454  sample_offset += samples_per_block;
1455  }
1456  /* Less than a full block of data left, e.g. when reading from
1457  * 2324 byte per sector XA; the remainder is padding */
1458  bytes_remaining = bytestream2_get_bytes_left(&gb);
1459  if (bytes_remaining > 0) {
1460  bytestream2_skip(&gb, bytes_remaining);
1461  }
1462  break;
1463  }
1465  for (i=0; i<=st; i++) {
1466  c->status[i].step_index = bytestream2_get_le32u(&gb);
1467  if (c->status[i].step_index > 88u) {
1468  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1469  i, c->status[i].step_index);
1470  return AVERROR_INVALIDDATA;
1471  }
1472  }
1473  for (i=0; i<=st; i++) {
1474  c->status[i].predictor = bytestream2_get_le32u(&gb);
1475  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1476  return AVERROR_INVALIDDATA;
1477  }
1478 
1479  for (n = nb_samples >> (1 - st); n > 0; n--) {
1480  int byte = bytestream2_get_byteu(&gb);
1481  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1482  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1483  }
1484  break;
1486  for (n = nb_samples >> (1 - st); n > 0; n--) {
1487  int byte = bytestream2_get_byteu(&gb);
1488  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1489  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1490  }
1491  break;
1492  case AV_CODEC_ID_ADPCM_EA:
1493  {
1494  int previous_left_sample, previous_right_sample;
1495  int current_left_sample, current_right_sample;
1496  int next_left_sample, next_right_sample;
1497  int coeff1l, coeff2l, coeff1r, coeff2r;
1498  int shift_left, shift_right;
1499 
1500  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1501  each coding 28 stereo samples. */
1502 
1503  if(avctx->channels != 2)
1504  return AVERROR_INVALIDDATA;
1505 
1506  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1507  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1508  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1509  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1510 
1511  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1512  int byte = bytestream2_get_byteu(&gb);
1513  coeff1l = ea_adpcm_table[ byte >> 4 ];
1514  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1515  coeff1r = ea_adpcm_table[ byte & 0x0F];
1516  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1517 
1518  byte = bytestream2_get_byteu(&gb);
1519  shift_left = 20 - (byte >> 4);
1520  shift_right = 20 - (byte & 0x0F);
1521 
1522  for (count2 = 0; count2 < 28; count2++) {
1523  byte = bytestream2_get_byteu(&gb);
1524  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1525  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1526 
1527  next_left_sample = (next_left_sample +
1528  (current_left_sample * coeff1l) +
1529  (previous_left_sample * coeff2l) + 0x80) >> 8;
1530  next_right_sample = (next_right_sample +
1531  (current_right_sample * coeff1r) +
1532  (previous_right_sample * coeff2r) + 0x80) >> 8;
1533 
1534  previous_left_sample = current_left_sample;
1535  current_left_sample = av_clip_int16(next_left_sample);
1536  previous_right_sample = current_right_sample;
1537  current_right_sample = av_clip_int16(next_right_sample);
1538  *samples++ = current_left_sample;
1539  *samples++ = current_right_sample;
1540  }
1541  }
1542 
1543  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1544 
1545  break;
1546  }
1548  {
1549  int coeff[2][2], shift[2];
1550 
1551  for(channel = 0; channel < avctx->channels; channel++) {
1552  int byte = bytestream2_get_byteu(&gb);
1553  for (i=0; i<2; i++)
1554  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1555  shift[channel] = 20 - (byte & 0x0F);
1556  }
1557  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1558  int byte[2];
1559 
1560  byte[0] = bytestream2_get_byteu(&gb);
1561  if (st) byte[1] = bytestream2_get_byteu(&gb);
1562  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1563  for(channel = 0; channel < avctx->channels; channel++) {
1564  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1565  sample = (sample +
1566  c->status[channel].sample1 * coeff[channel][0] +
1567  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1569  c->status[channel].sample1 = av_clip_int16(sample);
1570  *samples++ = c->status[channel].sample1;
1571  }
1572  }
1573  }
1574  bytestream2_seek(&gb, 0, SEEK_END);
1575  break;
1576  }
1579  case AV_CODEC_ID_ADPCM_EA_R3: {
1580  /* channel numbering
1581  2chan: 0=fl, 1=fr
1582  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1583  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1584  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1585  int previous_sample, current_sample, next_sample;
1586  int coeff1, coeff2;
1587  int shift;
1588  unsigned int channel;
1589  uint16_t *samplesC;
1590  int count = 0;
1591  int offsets[6];
1592 
1593  for (channel=0; channel<avctx->channels; channel++)
1594  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1595  bytestream2_get_le32(&gb)) +
1596  (avctx->channels + 1) * 4;
1597 
1598  for (channel=0; channel<avctx->channels; channel++) {
1599  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1600  samplesC = samples_p[channel];
1601 
1602  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1603  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1604  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1605  } else {
1606  current_sample = c->status[channel].predictor;
1607  previous_sample = c->status[channel].prev_sample;
1608  }
1609 
1610  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1611  int byte = bytestream2_get_byte(&gb);
1612  if (byte == 0xEE) { /* only seen in R2 and R3 */
1613  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1614  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1615 
1616  for (count2=0; count2<28; count2++)
1617  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1618  } else {
1619  coeff1 = ea_adpcm_table[ byte >> 4 ];
1620  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1621  shift = 20 - (byte & 0x0F);
1622 
1623  for (count2=0; count2<28; count2++) {
1624  if (count2 & 1)
1625  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1626  else {
1627  byte = bytestream2_get_byte(&gb);
1628  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1629  }
1630 
1631  next_sample += (current_sample * coeff1) +
1632  (previous_sample * coeff2);
1633  next_sample = av_clip_int16(next_sample >> 8);
1634 
1635  previous_sample = current_sample;
1636  current_sample = next_sample;
1637  *samplesC++ = current_sample;
1638  }
1639  }
1640  }
1641  if (!count) {
1642  count = count1;
1643  } else if (count != count1) {
1644  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1645  count = FFMAX(count, count1);
1646  }
1647 
1648  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1649  c->status[channel].predictor = current_sample;
1650  c->status[channel].prev_sample = previous_sample;
1651  }
1652  }
1653 
1654  frame->nb_samples = count * 28;
1655  bytestream2_seek(&gb, 0, SEEK_END);
1656  break;
1657  }
1659  for (channel=0; channel<avctx->channels; channel++) {
1660  int coeff[2][4], shift[4];
1661  int16_t *s = samples_p[channel];
1662  for (n = 0; n < 4; n++, s += 32) {
1663  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1664  for (i=0; i<2; i++)
1665  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1666  s[0] = val & ~0x0F;
1667 
1668  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1669  shift[n] = 20 - (val & 0x0F);
1670  s[1] = val & ~0x0F;
1671  }
1672 
1673  for (m=2; m<32; m+=2) {
1674  s = &samples_p[channel][m];
1675  for (n = 0; n < 4; n++, s += 32) {
1676  int level, pred;
1677  int byte = bytestream2_get_byteu(&gb);
1678 
1679  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1680  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1681  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1682 
1683  level = sign_extend(byte, 4) * (1 << shift[n]);
1684  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1685  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1686  }
1687  }
1688  }
1689  break;
1691  av_assert0(avctx->channels == 1);
1692 
1693  /*
1694  * Header format:
1695  * int16_t predictor;
1696  * uint8_t step_index;
1697  * uint8_t reserved;
1698  * uint32_t frame_size;
1699  *
1700  * Some implementations have step_index as 16-bits, but others
1701  * only use the lower 8 and store garbage in the upper 8.
1702  */
1703  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1704  c->status[0].step_index = bytestream2_get_byteu(&gb);
1705  bytestream2_skipu(&gb, 5);
1706  if (c->status[0].step_index > 88u) {
1707  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1708  c->status[0].step_index);
1709  return AVERROR_INVALIDDATA;
1710  }
1711 
1712  for (n = nb_samples >> 1; n > 0; n--) {
1713  int v = bytestream2_get_byteu(&gb);
1714 
1715  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1716  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1717  }
1718 
1719  if (nb_samples & 1) {
1720  int v = bytestream2_get_byteu(&gb);
1721  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1722 
1723  if (v & 0x0F) {
1724  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1725  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1726  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1727  }
1728  }
1729  break;
1731  for (i = 0; i < avctx->channels; i++) {
1732  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1733  c->status[i].step_index = bytestream2_get_byteu(&gb);
1734  bytestream2_skipu(&gb, 1);
1735  if (c->status[i].step_index > 88u) {
1736  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1737  c->status[i].step_index);
1738  return AVERROR_INVALIDDATA;
1739  }
1740  }
1741 
1742  for (n = nb_samples >> (1 - st); n > 0; n--) {
1743  int v = bytestream2_get_byteu(&gb);
1744 
1745  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1746  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1747  }
1748  break;
1749  case AV_CODEC_ID_ADPCM_CT:
1750  for (n = nb_samples >> (1 - st); n > 0; n--) {
1751  int v = bytestream2_get_byteu(&gb);
1752  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1753  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1754  }
1755  break;
1759  if (!c->status[0].step_index) {
1760  /* the first byte is a raw sample */
1761  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1762  if (st)
1763  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1764  c->status[0].step_index = 1;
1765  nb_samples--;
1766  }
1767  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1768  for (n = nb_samples >> (1 - st); n > 0; n--) {
1769  int byte = bytestream2_get_byteu(&gb);
1770  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1771  byte >> 4, 4, 0);
1772  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1773  byte & 0x0F, 4, 0);
1774  }
1775  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1776  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1777  int byte = bytestream2_get_byteu(&gb);
1778  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1779  byte >> 5 , 3, 0);
1780  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1781  (byte >> 2) & 0x07, 3, 0);
1782  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1783  byte & 0x03, 2, 0);
1784  }
1785  } else {
1786  for (n = nb_samples >> (2 - st); n > 0; n--) {
1787  int byte = bytestream2_get_byteu(&gb);
1788  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1789  byte >> 6 , 2, 2);
1790  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1791  (byte >> 4) & 0x03, 2, 2);
1792  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1793  (byte >> 2) & 0x03, 2, 2);
1794  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1795  byte & 0x03, 2, 2);
1796  }
1797  }
1798  break;
1799  case AV_CODEC_ID_ADPCM_SWF:
1800  adpcm_swf_decode(avctx, buf, buf_size, samples);
1801  bytestream2_seek(&gb, 0, SEEK_END);
1802  break;
1804  for (n = nb_samples >> (1 - st); n > 0; n--) {
1805  int v = bytestream2_get_byteu(&gb);
1806  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1807  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1808  }
1809  break;
1811  if (!c->has_status) {
1812  for (channel = 0; channel < avctx->channels; channel++)
1813  c->status[channel].step = 0;
1814  c->has_status = 1;
1815  }
1816  for (channel = 0; channel < avctx->channels; channel++) {
1817  samples = samples_p[channel];
1818  for (n = nb_samples >> 1; n > 0; n--) {
1819  int v = bytestream2_get_byteu(&gb);
1820  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1821  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1822  }
1823  }
1824  break;
1825  case AV_CODEC_ID_ADPCM_AFC:
1826  {
1827  int samples_per_block;
1828  int blocks;
1829 
1830  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1831  samples_per_block = avctx->extradata[0] / 16;
1832  blocks = nb_samples / avctx->extradata[0];
1833  } else {
1834  samples_per_block = nb_samples / 16;
1835  blocks = 1;
1836  }
1837 
1838  for (m = 0; m < blocks; m++) {
1839  for (channel = 0; channel < avctx->channels; channel++) {
1840  int prev1 = c->status[channel].sample1;
1841  int prev2 = c->status[channel].sample2;
1842 
1843  samples = samples_p[channel] + m * 16;
1844  /* Read in every sample for this channel. */
1845  for (i = 0; i < samples_per_block; i++) {
1846  int byte = bytestream2_get_byteu(&gb);
1847  int scale = 1 << (byte >> 4);
1848  int index = byte & 0xf;
1849  int factor1 = ff_adpcm_afc_coeffs[0][index];
1850  int factor2 = ff_adpcm_afc_coeffs[1][index];
1851 
1852  /* Decode 16 samples. */
1853  for (n = 0; n < 16; n++) {
1854  int32_t sampledat;
1855 
1856  if (n & 1) {
1857  sampledat = sign_extend(byte, 4);
1858  } else {
1859  byte = bytestream2_get_byteu(&gb);
1860  sampledat = sign_extend(byte >> 4, 4);
1861  }
1862 
1863  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1864  sampledat * scale;
1865  *samples = av_clip_int16(sampledat);
1866  prev2 = prev1;
1867  prev1 = *samples++;
1868  }
1869  }
1870 
1871  c->status[channel].sample1 = prev1;
1872  c->status[channel].sample2 = prev2;
1873  }
1874  }
1875  bytestream2_seek(&gb, 0, SEEK_END);
1876  break;
1877  }
1878  case AV_CODEC_ID_ADPCM_THP:
1880  {
1881  int table[14][16];
1882  int ch;
1883 
1884 #define THP_GET16(g) \
1885  sign_extend( \
1886  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1887  bytestream2_get_le16u(&(g)) : \
1888  bytestream2_get_be16u(&(g)), 16)
1889 
1890  if (avctx->extradata) {
1892  if (avctx->extradata_size < 32 * avctx->channels) {
1893  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1894  return AVERROR_INVALIDDATA;
1895  }
1896 
1897  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1898  for (i = 0; i < avctx->channels; i++)
1899  for (n = 0; n < 16; n++)
1900  table[i][n] = THP_GET16(tb);
1901  } else {
1902  for (i = 0; i < avctx->channels; i++)
1903  for (n = 0; n < 16; n++)
1904  table[i][n] = THP_GET16(gb);
1905 
1906  if (!c->has_status) {
1907  /* Initialize the previous sample. */
1908  for (i = 0; i < avctx->channels; i++) {
1909  c->status[i].sample1 = THP_GET16(gb);
1910  c->status[i].sample2 = THP_GET16(gb);
1911  }
1912  c->has_status = 1;
1913  } else {
1914  bytestream2_skip(&gb, avctx->channels * 4);
1915  }
1916  }
1917 
1918  for (ch = 0; ch < avctx->channels; ch++) {
1919  samples = samples_p[ch];
1920 
1921  /* Read in every sample for this channel. */
1922  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1923  int byte = bytestream2_get_byteu(&gb);
1924  int index = (byte >> 4) & 7;
1925  unsigned int exp = byte & 0x0F;
1926  int64_t factor1 = table[ch][index * 2];
1927  int64_t factor2 = table[ch][index * 2 + 1];
1928 
1929  /* Decode 14 samples. */
1930  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1931  int32_t sampledat;
1932 
1933  if (n & 1) {
1934  sampledat = sign_extend(byte, 4);
1935  } else {
1936  byte = bytestream2_get_byteu(&gb);
1937  sampledat = sign_extend(byte >> 4, 4);
1938  }
1939 
1940  sampledat = ((c->status[ch].sample1 * factor1
1941  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
1942  *samples = av_clip_int16(sampledat);
1943  c->status[ch].sample2 = c->status[ch].sample1;
1944  c->status[ch].sample1 = *samples++;
1945  }
1946  }
1947  }
1948  break;
1949  }
1950  case AV_CODEC_ID_ADPCM_DTK:
1951  for (channel = 0; channel < avctx->channels; channel++) {
1952  samples = samples_p[channel];
1953 
1954  /* Read in every sample for this channel. */
1955  for (i = 0; i < nb_samples / 28; i++) {
1956  int byte, header;
1957  if (channel)
1958  bytestream2_skipu(&gb, 1);
1959  header = bytestream2_get_byteu(&gb);
1960  bytestream2_skipu(&gb, 3 - channel);
1961 
1962  /* Decode 28 samples. */
1963  for (n = 0; n < 28; n++) {
1964  int32_t sampledat, prev;
1965 
1966  switch (header >> 4) {
1967  case 1:
1968  prev = (c->status[channel].sample1 * 0x3c);
1969  break;
1970  case 2:
1971  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1972  break;
1973  case 3:
1974  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1975  break;
1976  default:
1977  prev = 0;
1978  }
1979 
1980  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1981 
1982  byte = bytestream2_get_byteu(&gb);
1983  if (!channel)
1984  sampledat = sign_extend(byte, 4);
1985  else
1986  sampledat = sign_extend(byte >> 4, 4);
1987 
1988  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1989  *samples++ = av_clip_int16(sampledat >> 6);
1991  c->status[channel].sample1 = sampledat;
1992  }
1993  }
1994  if (!channel)
1995  bytestream2_seek(&gb, 0, SEEK_SET);
1996  }
1997  break;
1998  case AV_CODEC_ID_ADPCM_PSX:
1999  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * avctx->channels); block++) {
2000  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * avctx->channels) / (16 * avctx->channels);
2001  for (channel = 0; channel < avctx->channels; channel++) {
2002  samples = samples_p[channel] + block * nb_samples_per_block;
2003  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2004 
2005  /* Read in every sample for this channel. */
2006  for (i = 0; i < nb_samples_per_block / 28; i++) {
2007  int filter, shift, flag, byte;
2008 
2009  filter = bytestream2_get_byteu(&gb);
2010  shift = filter & 0xf;
2011  filter = filter >> 4;
2012  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table))
2013  return AVERROR_INVALIDDATA;
2014  flag = bytestream2_get_byteu(&gb);
2015 
2016  /* Decode 28 samples. */
2017  for (n = 0; n < 28; n++) {
2018  int sample = 0, scale;
2019 
2020  if (flag < 0x07) {
2021  if (n & 1) {
2022  scale = sign_extend(byte >> 4, 4);
2023  } else {
2024  byte = bytestream2_get_byteu(&gb);
2025  scale = sign_extend(byte, 4);
2026  }
2027 
2028  scale = scale * (1 << 12);
2029  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2030  }
2031  *samples++ = av_clip_int16(sample);
2033  c->status[channel].sample1 = sample;
2034  }
2035  }
2036  }
2037  }
2038  break;
2040  /*
2041  * The format of each block:
2042  * uint8_t left_control;
2043  * uint4_t left_samples[nb_samples];
2044  * ---- and if stereo ----
2045  * uint8_t right_control;
2046  * uint4_t right_samples[nb_samples];
2047  *
2048  * Format of the control byte:
2049  * MSB [SSSSRDRR] LSB
2050  * S = (Shift Amount - 2)
2051  * D = Decoder flag.
2052  * R = Reserved
2053  *
2054  * Each block relies on the previous two samples of each channel.
2055  * They should be 0 initially.
2056  */
2057  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2058  for (channel = 0; channel < avctx->channels; channel++) {
2059  int control, shift;
2060 
2061  samples = samples_p[channel] + block * 32;
2062  cs = c->status + channel;
2063 
2064  /* Get the control byte and decode the samples, 2 at a time. */
2065  control = bytestream2_get_byteu(&gb);
2066  shift = (control >> 4) + 2;
2067 
2068  for (n = 0; n < 16; n++) {
2069  int sample = bytestream2_get_byteu(&gb);
2070  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2071  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2072  }
2073  }
2074  }
2075  break;
2077  if (!c->has_status) {
2078  for (channel = 0; channel < avctx->channels; channel++) {
2079  c->status[channel].predictor = 0;
2080  c->status[channel].step_index = 0;
2081  }
2082  c->has_status = 1;
2083  }
2084  for (n = 0; n < nb_samples * avctx->channels; n++) {
2085  int v = bytestream2_get_byteu(&gb);
2086  *samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
2087  }
2088  break;
2090  for (n = nb_samples / 2; n > 0; n--) {
2091  for (channel = 0; channel < avctx->channels; channel++) {
2092  int v = bytestream2_get_byteu(&gb);
2093  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2094  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2095  }
2096  samples += avctx->channels;
2097  }
2098  break;
2099  default:
2100  av_assert0(0); // unsupported codec_id should not happen
2101  }
2102 
2103  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2104  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2105  return AVERROR_INVALIDDATA;
2106  }
2107 
2108  *got_frame_ptr = 1;
2109 
2110  if (avpkt->size < bytestream2_tell(&gb)) {
2111  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2112  return avpkt->size;
2113  }
2114 
2115  return bytestream2_tell(&gb);
2116 }
2117 
2118 static void adpcm_flush(AVCodecContext *avctx)
2119 {
2120  ADPCMDecodeContext *c = avctx->priv_data;
2121  c->has_status = 0;
2122 }
2123 
2124 
2132 
2133 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
2134 AVCodec ff_ ## name_ ## _decoder = { \
2135  .name = #name_, \
2136  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2137  .type = AVMEDIA_TYPE_AUDIO, \
2138  .id = id_, \
2139  .priv_data_size = sizeof(ADPCMDecodeContext), \
2140  .init = adpcm_decode_init, \
2141  .decode = adpcm_decode_frame, \
2142  .flush = adpcm_flush, \
2143  .capabilities = AV_CODEC_CAP_DR1, \
2144  .sample_fmts = sample_fmts_, \
2145 }
2146 
2147 /* Note: Do not forget to add new entries to the Makefile as well. */
2148 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
2149 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
2150 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
2151 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
2152 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games");
2153 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
2154 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
2155 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
2156 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
2157 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
2158 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
2159 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
2160 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
2161 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
2162 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
2163 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM");
2164 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_CUNNING, sample_fmts_s16, adpcm_ima_cunning, "ADPCM IMA Cunning Developments");
2165 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
2166 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
2167 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
2168 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
2169 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
2170 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
2171 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX");
2172 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework");
2173 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
2174 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
2175 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
2176 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive");
2177 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
2178 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP");
2179 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
2180 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
2181 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
2182 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
2183 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
2184 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
2185 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
2186 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
2187 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
2188 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
2189 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
2190 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
2191 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
2192 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork");
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:540
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:73
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int shift(int a, int b)
Definition: sonic.c:82
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:507
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define THP_GET16(g)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:115
#define avpriv_request_sample(...)
int size
Definition: packet.h:364
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2125
#define sample
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:91
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:1228
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:412
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1199
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:106
float delta
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2118
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:639
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:632
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
ADPCM tables.
uint8_t * data
Definition: packet.h:363
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static const int8_t mtf_index_table[16]
Definition: adpcm.c:93
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:724
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:516
bitstream reader API header.
ptrdiff_t size
Definition: opengl_enc.c:100
static const uint8_t header[24]
Definition: sdr2.c:67
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1755
channels
Definition: aptx.h:33
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
enum AVCodecID id
Definition: codec.h:204
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:343
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:65
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
ADPCM encoder/decoder common header.
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:696
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
GLsizei count
Definition: opengl_enc.c:108
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:431
#define FFMAX(a, b)
Definition: common.h:94
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:363
int8_t exp
Definition: eval.c:72
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:304
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:82
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:551
#define FFMIN(a, b)
Definition: common.h:96
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
int vqa_version
VQA version.
Definition: adpcm.c:102
int32_t
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2127
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
Definition: siprdata.h:259
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:91
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
#define abs(x)
Definition: cuda_runtime.h:35
main external API structure.
Definition: avcodec.h:531
const int16_t ff_adpcm_ima_cunning_step_table[61]
Definition: adpcm_data.c:197
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:493
#define DK3_GET_NEXT_NIBBLE()
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1879
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:278
int extradata_size
Definition: avcodec.h:633
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:327
int index
Definition: gxfenc.c:89
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:234
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:386
ADPCMChannelStatus status[14]
Definition: adpcm.c:101
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:473
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
const int8_t ff_adpcm_ima_cunning_index_table[9]
Definition: adpcm_data.c:187
uint8_t level
Definition: svq3.c:205
int
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
common internal api header.
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:944
signed 16 bits
Definition: samplefmt.h:61
#define flag(name)
Definition: cbs_av1.c:552
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
unsigned bps
Definition: movenc.c:1598
static const int8_t zork_index_table[8]
Definition: adpcm.c:89
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
void * priv_data
Definition: avcodec.h:558
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:663
int channels
number of audio channels
Definition: avcodec.h:1192
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:452
static float add(float src0, float src1)
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2129
Filter the word “frame” indicates either a video frame or a group of audio samples
int16_t step_index
Definition: adpcm.h:33
signed 16 bits, planar
Definition: samplefmt.h:67
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:361
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:380
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define tb
Definition: regdef.h:68
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:2133