FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  *
16  * This file is part of FFmpeg.
17  *
18  * FFmpeg is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU Lesser General Public
20  * License as published by the Free Software Foundation; either
21  * version 2.1 of the License, or (at your option) any later version.
22  *
23  * FFmpeg is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26  * Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public
29  * License along with FFmpeg; if not, write to the Free Software
30  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31  */
32 #include "avcodec.h"
33 #include "get_bits.h"
34 #include "bytestream.h"
35 #include "adpcm.h"
36 #include "adpcm_data.h"
37 #include "internal.h"
38 
39 /**
40  * @file
41  * ADPCM decoders
42  * Features and limitations:
43  *
44  * Reference documents:
45  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
46  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
47  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
48  * http://openquicktime.sourceforge.net/
49  * XAnim sources (xa_codec.c) http://xanim.polter.net/
50  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
51  * SoX source code http://sox.sourceforge.net/
52  *
53  * CD-ROM XA:
54  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
55  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
56  * readstr http://www.geocities.co.jp/Playtown/2004/
57  */
58 
59 /* These are for CD-ROM XA ADPCM */
60 static const int8_t xa_adpcm_table[5][2] = {
61  { 0, 0 },
62  { 60, 0 },
63  { 115, -52 },
64  { 98, -55 },
65  { 122, -60 }
66 };
67 
68 static const int16_t ea_adpcm_table[] = {
69  0, 240, 460, 392,
70  0, 0, -208, -220,
71  0, 1, 3, 4,
72  7, 8, 10, 11,
73  0, -1, -3, -4
74 };
75 
76 // padded to zero where table size is less then 16
77 static const int8_t swf_index_tables[4][16] = {
78  /*2*/ { -1, 2 },
79  /*3*/ { -1, -1, 2, 4 },
80  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
81  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
82 };
83 
84 /* end of tables */
85 
86 typedef struct ADPCMDecodeContext {
88  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
91 
93 {
94  ADPCMDecodeContext *c = avctx->priv_data;
95  unsigned int min_channels = 1;
96  unsigned int max_channels = 2;
97 
98  switch(avctx->codec->id) {
101  min_channels = 2;
102  break;
108  max_channels = 6;
109  break;
111  min_channels = 2;
112  max_channels = 8;
113  if (avctx->channels & 1) {
114  avpriv_request_sample(avctx, "channel count %d\n", avctx->channels);
115  return AVERROR_PATCHWELCOME;
116  }
117  break;
119  max_channels = 8;
120  break;
124  max_channels = 14;
125  break;
126  }
127  if (avctx->channels < min_channels || avctx->channels > max_channels) {
128  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
129  return AVERROR(EINVAL);
130  }
131 
132  switch(avctx->codec->id) {
134  c->status[0].step = c->status[1].step = 511;
135  break;
137  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
138  return AVERROR_INVALIDDATA;
139  break;
141  if (avctx->extradata && avctx->extradata_size >= 8) {
142  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
143  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
144  }
145  break;
147  if (avctx->extradata && avctx->extradata_size >= 2)
148  c->vqa_version = AV_RL16(avctx->extradata);
149  break;
150  default:
151  break;
152  }
153 
154  switch(avctx->codec->id) {
172  break;
174  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
176  break;
177  default:
178  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
179  }
180 
181  return 0;
182 }
183 
184 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
185 {
186  int delta, pred, step, add;
187 
188  pred = c->predictor;
189  delta = nibble & 7;
190  step = c->step;
191  add = (delta * 2 + 1) * step;
192  if (add < 0)
193  add = add + 7;
194 
195  if ((nibble & 8) == 0)
196  pred = av_clip(pred + (add >> 3), -32767, 32767);
197  else
198  pred = av_clip(pred - (add >> 3), -32767, 32767);
199 
200  switch (delta) {
201  case 7:
202  step *= 0x99;
203  break;
204  case 6:
205  c->step = av_clip(c->step * 2, 127, 24576);
206  c->predictor = pred;
207  return pred;
208  case 5:
209  step *= 0x66;
210  break;
211  case 4:
212  step *= 0x4d;
213  break;
214  default:
215  step *= 0x39;
216  break;
217  }
218 
219  if (step < 0)
220  step += 0x3f;
221 
222  c->step = step >> 6;
223  c->step = av_clip(c->step, 127, 24576);
224  c->predictor = pred;
225  return pred;
226 }
227 
228 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
229 {
230  int step_index;
231  int predictor;
232  int sign, delta, diff, step;
233 
234  step = ff_adpcm_step_table[c->step_index];
235  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
236  step_index = av_clip(step_index, 0, 88);
237 
238  sign = nibble & 8;
239  delta = nibble & 7;
240  /* perform direct multiplication instead of series of jumps proposed by
241  * the reference ADPCM implementation since modern CPUs can do the mults
242  * quickly enough */
243  diff = ((2 * delta + 1) * step) >> shift;
244  predictor = c->predictor;
245  if (sign) predictor -= diff;
246  else predictor += diff;
247 
248  c->predictor = av_clip_int16(predictor);
249  c->step_index = step_index;
250 
251  return (int16_t)c->predictor;
252 }
253 
255 {
256  int nibble, step_index, predictor, sign, delta, diff, step, shift;
257 
258  shift = bps - 1;
259  nibble = get_bits_le(gb, bps),
260  step = ff_adpcm_step_table[c->step_index];
261  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
262  step_index = av_clip(step_index, 0, 88);
263 
264  sign = nibble & (1 << shift);
265  delta = av_mod_uintp2(nibble, shift);
266  diff = ((2 * delta + 1) * step) >> shift;
267  predictor = c->predictor;
268  if (sign) predictor -= diff;
269  else predictor += diff;
270 
271  c->predictor = av_clip_int16(predictor);
272  c->step_index = step_index;
273 
274  return (int16_t)c->predictor;
275 }
276 
277 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
278 {
279  int step_index;
280  int predictor;
281  int diff, step;
282 
283  step = ff_adpcm_step_table[c->step_index];
284  step_index = c->step_index + ff_adpcm_index_table[nibble];
285  step_index = av_clip(step_index, 0, 88);
286 
287  diff = step >> 3;
288  if (nibble & 4) diff += step;
289  if (nibble & 2) diff += step >> 1;
290  if (nibble & 1) diff += step >> 2;
291 
292  if (nibble & 8)
293  predictor = c->predictor - diff;
294  else
295  predictor = c->predictor + diff;
296 
297  c->predictor = av_clip_int16(predictor);
298  c->step_index = step_index;
299 
300  return c->predictor;
301 }
302 
303 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
304 {
305  int predictor;
306 
307  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
308  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
309 
310  c->sample2 = c->sample1;
311  c->sample1 = av_clip_int16(predictor);
312  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
313  if (c->idelta < 16) c->idelta = 16;
314  if (c->idelta > INT_MAX/768) {
315  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
316  c->idelta = INT_MAX/768;
317  }
318 
319  return c->sample1;
320 }
321 
322 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
323 {
324  int step_index, predictor, sign, delta, diff, step;
325 
326  step = ff_adpcm_oki_step_table[c->step_index];
327  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
328  step_index = av_clip(step_index, 0, 48);
329 
330  sign = nibble & 8;
331  delta = nibble & 7;
332  diff = ((2 * delta + 1) * step) >> 3;
333  predictor = c->predictor;
334  if (sign) predictor -= diff;
335  else predictor += diff;
336 
337  c->predictor = av_clip_intp2(predictor, 11);
338  c->step_index = step_index;
339 
340  return c->predictor * 16;
341 }
342 
343 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
344 {
345  int sign, delta, diff;
346  int new_step;
347 
348  sign = nibble & 8;
349  delta = nibble & 7;
350  /* perform direct multiplication instead of series of jumps proposed by
351  * the reference ADPCM implementation since modern CPUs can do the mults
352  * quickly enough */
353  diff = ((2 * delta + 1) * c->step) >> 3;
354  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
355  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
356  c->predictor = av_clip_int16(c->predictor);
357  /* calculate new step and clamp it to range 511..32767 */
358  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
359  c->step = av_clip(new_step, 511, 32767);
360 
361  return (int16_t)c->predictor;
362 }
363 
364 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
365 {
366  int sign, delta, diff;
367 
368  sign = nibble & (1<<(size-1));
369  delta = nibble & ((1<<(size-1))-1);
370  diff = delta << (7 + c->step + shift);
371 
372  /* clamp result */
373  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
374 
375  /* calculate new step */
376  if (delta >= (2*size - 3) && c->step < 3)
377  c->step++;
378  else if (delta == 0 && c->step > 0)
379  c->step--;
380 
381  return (int16_t) c->predictor;
382 }
383 
385 {
386  if(!c->step) {
387  c->predictor = 0;
388  c->step = 127;
389  }
390 
391  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
392  c->predictor = av_clip_int16(c->predictor);
393  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
394  c->step = av_clip(c->step, 127, 24576);
395  return c->predictor;
396 }
397 
398 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
399 {
400  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
401  c->predictor = av_clip_int16(c->predictor);
402  c->step += ff_adpcm_index_table[nibble];
403  c->step = av_clip_uintp2(c->step, 5);
404  return c->predictor;
405 }
406 
407 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
409  ADPCMChannelStatus *right, int channels, int sample_offset)
410 {
411  int i, j;
412  int shift,filter,f0,f1;
413  int s_1,s_2;
414  int d,s,t;
415 
416  out0 += sample_offset;
417  if (channels == 1)
418  out1 = out0 + 28;
419  else
420  out1 += sample_offset;
421 
422  for(i=0;i<4;i++) {
423  shift = 12 - (in[4+i*2] & 15);
424  filter = in[4+i*2] >> 4;
426  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
427  filter=0;
428  }
429  if (shift < 0) {
430  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
431  shift = 0;
432  }
433  f0 = xa_adpcm_table[filter][0];
434  f1 = xa_adpcm_table[filter][1];
435 
436  s_1 = left->sample1;
437  s_2 = left->sample2;
438 
439  for(j=0;j<28;j++) {
440  d = in[16+i+j*4];
441 
442  t = sign_extend(d, 4);
443  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
444  s_2 = s_1;
445  s_1 = av_clip_int16(s);
446  out0[j] = s_1;
447  }
448 
449  if (channels == 2) {
450  left->sample1 = s_1;
451  left->sample2 = s_2;
452  s_1 = right->sample1;
453  s_2 = right->sample2;
454  }
455 
456  shift = 12 - (in[5+i*2] & 15);
457  filter = in[5+i*2] >> 4;
458  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
459  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
460  filter=0;
461  }
462  if (shift < 0) {
463  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
464  shift = 0;
465  }
466 
467  f0 = xa_adpcm_table[filter][0];
468  f1 = xa_adpcm_table[filter][1];
469 
470  for(j=0;j<28;j++) {
471  d = in[16+i+j*4];
472 
473  t = sign_extend(d >> 4, 4);
474  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
475  s_2 = s_1;
476  s_1 = av_clip_int16(s);
477  out1[j] = s_1;
478  }
479 
480  if (channels == 2) {
481  right->sample1 = s_1;
482  right->sample2 = s_2;
483  } else {
484  left->sample1 = s_1;
485  left->sample2 = s_2;
486  }
487 
488  out0 += 28 * (3 - channels);
489  out1 += 28 * (3 - channels);
490  }
491 
492  return 0;
493 }
494 
495 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
496 {
497  ADPCMDecodeContext *c = avctx->priv_data;
498  GetBitContext gb;
499  const int8_t *table;
500  int k0, signmask, nb_bits, count;
501  int size = buf_size*8;
502  int i;
503 
504  init_get_bits(&gb, buf, size);
505 
506  //read bits & initial values
507  nb_bits = get_bits(&gb, 2)+2;
508  table = swf_index_tables[nb_bits-2];
509  k0 = 1 << (nb_bits-2);
510  signmask = 1 << (nb_bits-1);
511 
512  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
513  for (i = 0; i < avctx->channels; i++) {
514  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
515  c->status[i].step_index = get_bits(&gb, 6);
516  }
517 
518  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
519  int i;
520 
521  for (i = 0; i < avctx->channels; i++) {
522  // similar to IMA adpcm
523  int delta = get_bits(&gb, nb_bits);
524  int step = ff_adpcm_step_table[c->status[i].step_index];
525  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
526  int k = k0;
527 
528  do {
529  if (delta & k)
530  vpdiff += step;
531  step >>= 1;
532  k >>= 1;
533  } while(k);
534  vpdiff += step;
535 
536  if (delta & signmask)
537  c->status[i].predictor -= vpdiff;
538  else
539  c->status[i].predictor += vpdiff;
540 
541  c->status[i].step_index += table[delta & (~signmask)];
542 
543  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
544  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
545 
546  *samples++ = c->status[i].predictor;
547  }
548  }
549  }
550 }
551 
552 /**
553  * Get the number of samples that will be decoded from the packet.
554  * In one case, this is actually the maximum number of samples possible to
555  * decode with the given buf_size.
556  *
557  * @param[out] coded_samples set to the number of samples as coded in the
558  * packet, or 0 if the codec does not encode the
559  * number of samples in each frame.
560  * @param[out] approx_nb_samples set to non-zero if the number of samples
561  * returned is an approximation.
562  */
564  int buf_size, int *coded_samples, int *approx_nb_samples)
565 {
566  ADPCMDecodeContext *s = avctx->priv_data;
567  int nb_samples = 0;
568  int ch = avctx->channels;
569  int has_coded_samples = 0;
570  int header_size;
571 
572  *coded_samples = 0;
573  *approx_nb_samples = 0;
574 
575  if(ch <= 0)
576  return 0;
577 
578  switch (avctx->codec->id) {
579  /* constant, only check buf_size */
581  if (buf_size < 76 * ch)
582  return 0;
583  nb_samples = 128;
584  break;
586  if (buf_size < 34 * ch)
587  return 0;
588  nb_samples = 64;
589  break;
590  /* simple 4-bit adpcm */
598  nb_samples = buf_size * 2 / ch;
599  break;
600  }
601  if (nb_samples)
602  return nb_samples;
603 
604  /* simple 4-bit adpcm, with header */
605  header_size = 0;
606  switch (avctx->codec->id) {
610  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
611  case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
612  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
613  }
614  if (header_size > 0)
615  return (buf_size - header_size) * 2 / ch;
616 
617  /* more complex formats */
618  switch (avctx->codec->id) {
620  has_coded_samples = 1;
621  *coded_samples = bytestream2_get_le32(gb);
622  *coded_samples -= *coded_samples % 28;
623  nb_samples = (buf_size - 12) / 30 * 28;
624  break;
626  has_coded_samples = 1;
627  *coded_samples = bytestream2_get_le32(gb);
628  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
629  break;
631  nb_samples = (buf_size - ch) / ch * 2;
632  break;
636  /* maximum number of samples */
637  /* has internal offsets and a per-frame switch to signal raw 16-bit */
638  has_coded_samples = 1;
639  switch (avctx->codec->id) {
641  header_size = 4 + 9 * ch;
642  *coded_samples = bytestream2_get_le32(gb);
643  break;
645  header_size = 4 + 5 * ch;
646  *coded_samples = bytestream2_get_le32(gb);
647  break;
649  header_size = 4 + 5 * ch;
650  *coded_samples = bytestream2_get_be32(gb);
651  break;
652  }
653  *coded_samples -= *coded_samples % 28;
654  nb_samples = (buf_size - header_size) * 2 / ch;
655  nb_samples -= nb_samples % 28;
656  *approx_nb_samples = 1;
657  break;
659  if (avctx->block_align > 0)
660  buf_size = FFMIN(buf_size, avctx->block_align);
661  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
662  break;
664  if (avctx->block_align > 0)
665  buf_size = FFMIN(buf_size, avctx->block_align);
666  if (buf_size < 4 * ch)
667  return AVERROR_INVALIDDATA;
668  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
669  break;
671  if (avctx->block_align > 0)
672  buf_size = FFMIN(buf_size, avctx->block_align);
673  nb_samples = (buf_size - 4 * ch) * 2 / ch;
674  break;
676  {
677  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
678  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
679  if (avctx->block_align > 0)
680  buf_size = FFMIN(buf_size, avctx->block_align);
681  if (buf_size < 4 * ch)
682  return AVERROR_INVALIDDATA;
683  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
684  break;
685  }
687  if (avctx->block_align > 0)
688  buf_size = FFMIN(buf_size, avctx->block_align);
689  nb_samples = (buf_size - 6 * ch) * 2 / ch;
690  break;
692  if (avctx->block_align > 0)
693  buf_size = FFMIN(buf_size, avctx->block_align);
694  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
695  break;
699  {
700  int samples_per_byte;
701  switch (avctx->codec->id) {
702  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
703  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
704  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
705  }
706  if (!s->status[0].step_index) {
707  if (buf_size < ch)
708  return AVERROR_INVALIDDATA;
709  nb_samples++;
710  buf_size -= ch;
711  }
712  nb_samples += buf_size * samples_per_byte / ch;
713  break;
714  }
716  {
717  int buf_bits = buf_size * 8 - 2;
718  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
719  int block_hdr_size = 22 * ch;
720  int block_size = block_hdr_size + nbits * ch * 4095;
721  int nblocks = buf_bits / block_size;
722  int bits_left = buf_bits - nblocks * block_size;
723  nb_samples = nblocks * 4096;
724  if (bits_left >= block_hdr_size)
725  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
726  break;
727  }
730  if (avctx->extradata) {
731  nb_samples = buf_size * 14 / (8 * ch);
732  break;
733  }
734  has_coded_samples = 1;
735  bytestream2_skip(gb, 4); // channel size
736  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
737  bytestream2_get_le32(gb) :
738  bytestream2_get_be32(gb);
739  buf_size -= 8 + 36 * ch;
740  buf_size /= ch;
741  nb_samples = buf_size / 8 * 14;
742  if (buf_size % 8 > 1)
743  nb_samples += (buf_size % 8 - 1) * 2;
744  *approx_nb_samples = 1;
745  break;
747  nb_samples = buf_size / (9 * ch) * 16;
748  break;
750  nb_samples = (buf_size / 128) * 224 / ch;
751  break;
754  nb_samples = buf_size / (16 * ch) * 28;
755  break;
756  }
757 
758  /* validate coded sample count */
759  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
760  return AVERROR_INVALIDDATA;
761 
762  return nb_samples;
763 }
764 
765 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
766  int *got_frame_ptr, AVPacket *avpkt)
767 {
768  AVFrame *frame = data;
769  const uint8_t *buf = avpkt->data;
770  int buf_size = avpkt->size;
771  ADPCMDecodeContext *c = avctx->priv_data;
772  ADPCMChannelStatus *cs;
773  int n, m, channel, i;
774  int16_t *samples;
775  int16_t **samples_p;
776  int st; /* stereo */
777  int count1, count2;
778  int nb_samples, coded_samples, approx_nb_samples, ret;
779  GetByteContext gb;
780 
781  bytestream2_init(&gb, buf, buf_size);
782  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
783  if (nb_samples <= 0) {
784  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
785  return AVERROR_INVALIDDATA;
786  }
787 
788  /* get output buffer */
789  frame->nb_samples = nb_samples;
790  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
791  return ret;
792  samples = (int16_t *)frame->data[0];
793  samples_p = (int16_t **)frame->extended_data;
794 
795  /* use coded_samples when applicable */
796  /* it is always <= nb_samples, so the output buffer will be large enough */
797  if (coded_samples) {
798  if (!approx_nb_samples && coded_samples != nb_samples)
799  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
800  frame->nb_samples = nb_samples = coded_samples;
801  }
802 
803  st = avctx->channels == 2 ? 1 : 0;
804 
805  switch(avctx->codec->id) {
807  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
808  Channel data is interleaved per-chunk. */
809  for (channel = 0; channel < avctx->channels; channel++) {
810  int predictor;
811  int step_index;
812  cs = &(c->status[channel]);
813  /* (pppppp) (piiiiiii) */
814 
815  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
816  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
817  step_index = predictor & 0x7F;
818  predictor &= ~0x7F;
819 
820  if (cs->step_index == step_index) {
821  int diff = predictor - cs->predictor;
822  if (diff < 0)
823  diff = - diff;
824  if (diff > 0x7f)
825  goto update;
826  } else {
827  update:
828  cs->step_index = step_index;
829  cs->predictor = predictor;
830  }
831 
832  if (cs->step_index > 88u){
833  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
834  channel, cs->step_index);
835  return AVERROR_INVALIDDATA;
836  }
837 
838  samples = samples_p[channel];
839 
840  for (m = 0; m < 64; m += 2) {
841  int byte = bytestream2_get_byteu(&gb);
842  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F, 3);
843  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 , 3);
844  }
845  }
846  break;
848  for(i=0; i<avctx->channels; i++){
849  cs = &(c->status[i]);
850  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
851 
852  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
853  if (cs->step_index > 88u){
854  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
855  i, cs->step_index);
856  return AVERROR_INVALIDDATA;
857  }
858  }
859 
860  if (avctx->bits_per_coded_sample != 4) {
861  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
862  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
865 
866  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
867  for (i = 0; i < avctx->channels; i++) {
868  int j;
869 
870  cs = &c->status[i];
871  samples = &samples_p[i][1 + n * samples_per_block];
872  for (j = 0; j < block_size; j++) {
873  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
874  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
875  }
876  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
877  if (ret < 0)
878  return ret;
879  for (m = 0; m < samples_per_block; m++) {
881  avctx->bits_per_coded_sample);
882  }
883  }
884  }
885  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
886  } else {
887  for (n = 0; n < (nb_samples - 1) / 8; n++) {
888  for (i = 0; i < avctx->channels; i++) {
889  cs = &c->status[i];
890  samples = &samples_p[i][1 + n * 8];
891  for (m = 0; m < 8; m += 2) {
892  int v = bytestream2_get_byteu(&gb);
893  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
894  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
895  }
896  }
897  }
898  }
899  break;
901  for (i = 0; i < avctx->channels; i++)
902  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
903 
904  for (i = 0; i < avctx->channels; i++) {
905  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
906  if (c->status[i].step_index > 88u) {
907  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
908  i, c->status[i].step_index);
909  return AVERROR_INVALIDDATA;
910  }
911  }
912 
913  for (i = 0; i < avctx->channels; i++) {
914  samples = (int16_t *)frame->data[i];
915  cs = &c->status[i];
916  for (n = nb_samples >> 1; n > 0; n--) {
917  int v = bytestream2_get_byteu(&gb);
918  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
919  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
920  }
921  }
922  break;
924  for (i = 0; i < avctx->channels; i++)
925  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
926  for (i = 0; i < avctx->channels; i++)
927  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
928 
929  for (n = 0; n < nb_samples >> (1 - st); n++) {
930  int v = bytestream2_get_byteu(&gb);
931  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
932  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
933  }
934  break;
936  {
937  int block_predictor;
938 
939  block_predictor = bytestream2_get_byteu(&gb);
940  if (block_predictor > 6) {
941  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
942  block_predictor);
943  return AVERROR_INVALIDDATA;
944  }
945  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
946  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
947  if (st) {
948  block_predictor = bytestream2_get_byteu(&gb);
949  if (block_predictor > 6) {
950  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
951  block_predictor);
952  return AVERROR_INVALIDDATA;
953  }
954  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
955  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
956  }
957  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
958  if (st){
959  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
960  }
961 
962  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
963  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
964  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
965  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
966 
967  *samples++ = c->status[0].sample2;
968  if (st) *samples++ = c->status[1].sample2;
969  *samples++ = c->status[0].sample1;
970  if (st) *samples++ = c->status[1].sample1;
971  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
972  int byte = bytestream2_get_byteu(&gb);
973  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
974  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
975  }
976  break;
977  }
979  for (channel = 0; channel < avctx->channels; channel+=2) {
980  bytestream2_skipu(&gb, 4);
981  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
982  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
983  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
984  bytestream2_skipu(&gb, 2);
985  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
986  bytestream2_skipu(&gb, 2);
987  for (n = 0; n < nb_samples; n+=2) {
988  int v = bytestream2_get_byteu(&gb);
989  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
990  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
991  }
992  for (n = 0; n < nb_samples; n+=2) {
993  int v = bytestream2_get_byteu(&gb);
994  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
995  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
996  }
997  }
998  break;
1000  for (channel = 0; channel < avctx->channels; channel++) {
1001  cs = &c->status[channel];
1002  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1003  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1004  if (cs->step_index > 88u){
1005  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1006  channel, cs->step_index);
1007  return AVERROR_INVALIDDATA;
1008  }
1009  }
1010  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1011  int v = bytestream2_get_byteu(&gb);
1012  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1013  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1014  }
1015  break;
1017  {
1018  int last_byte = 0;
1019  int nibble;
1020  int decode_top_nibble_next = 0;
1021  int diff_channel;
1022  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1023 
1024  bytestream2_skipu(&gb, 10);
1025  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1026  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1027  c->status[0].step_index = bytestream2_get_byteu(&gb);
1028  c->status[1].step_index = bytestream2_get_byteu(&gb);
1029  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1030  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1031  c->status[0].step_index, c->status[1].step_index);
1032  return AVERROR_INVALIDDATA;
1033  }
1034  /* sign extend the predictors */
1035  diff_channel = c->status[1].predictor;
1036 
1037  /* DK3 ADPCM support macro */
1038 #define DK3_GET_NEXT_NIBBLE() \
1039  if (decode_top_nibble_next) { \
1040  nibble = last_byte >> 4; \
1041  decode_top_nibble_next = 0; \
1042  } else { \
1043  last_byte = bytestream2_get_byteu(&gb); \
1044  nibble = last_byte & 0x0F; \
1045  decode_top_nibble_next = 1; \
1046  }
1047 
1048  while (samples < samples_end) {
1049 
1050  /* for this algorithm, c->status[0] is the sum channel and
1051  * c->status[1] is the diff channel */
1052 
1053  /* process the first predictor of the sum channel */
1055  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1056 
1057  /* process the diff channel predictor */
1059  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1060 
1061  /* process the first pair of stereo PCM samples */
1062  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1063  *samples++ = c->status[0].predictor + c->status[1].predictor;
1064  *samples++ = c->status[0].predictor - c->status[1].predictor;
1065 
1066  /* process the second predictor of the sum channel */
1068  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1069 
1070  /* process the second pair of stereo PCM samples */
1071  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1072  *samples++ = c->status[0].predictor + c->status[1].predictor;
1073  *samples++ = c->status[0].predictor - c->status[1].predictor;
1074  }
1075 
1076  if ((bytestream2_tell(&gb) & 1))
1077  bytestream2_skip(&gb, 1);
1078  break;
1079  }
1081  for (channel = 0; channel < avctx->channels; channel++) {
1082  cs = &c->status[channel];
1083  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1084  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1085  if (cs->step_index > 88u){
1086  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1087  channel, cs->step_index);
1088  return AVERROR_INVALIDDATA;
1089  }
1090  }
1091 
1092  for (n = nb_samples >> (1 - st); n > 0; n--) {
1093  int v1, v2;
1094  int v = bytestream2_get_byteu(&gb);
1095  /* nibbles are swapped for mono */
1096  if (st) {
1097  v1 = v >> 4;
1098  v2 = v & 0x0F;
1099  } else {
1100  v2 = v >> 4;
1101  v1 = v & 0x0F;
1102  }
1103  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1104  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1105  }
1106  break;
1108  for (channel = 0; channel < avctx->channels; channel++) {
1109  cs = &c->status[channel];
1110  samples = samples_p[channel];
1111  bytestream2_skip(&gb, 4);
1112  for (n = 0; n < nb_samples; n += 2) {
1113  int v = bytestream2_get_byteu(&gb);
1114  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1115  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1116  }
1117  }
1118  break;
1120  while (bytestream2_get_bytes_left(&gb) > 0) {
1121  int v = bytestream2_get_byteu(&gb);
1122  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1123  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1124  }
1125  break;
1127  while (bytestream2_get_bytes_left(&gb) > 0) {
1128  int v = bytestream2_get_byteu(&gb);
1129  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1130  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1131  }
1132  break;
1134  for (channel = 0; channel < avctx->channels; channel++) {
1135  cs = &c->status[channel];
1136  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1137  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1138  if (cs->step_index > 88u){
1139  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1140  channel, cs->step_index);
1141  return AVERROR_INVALIDDATA;
1142  }
1143  }
1144  for (n = 0; n < nb_samples / 2; n++) {
1145  int byte[2];
1146 
1147  byte[0] = bytestream2_get_byteu(&gb);
1148  if (st)
1149  byte[1] = bytestream2_get_byteu(&gb);
1150  for(channel = 0; channel < avctx->channels; channel++) {
1151  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1152  }
1153  for(channel = 0; channel < avctx->channels; channel++) {
1154  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1155  }
1156  }
1157  break;
1159  if (c->vqa_version == 3) {
1160  for (channel = 0; channel < avctx->channels; channel++) {
1161  int16_t *smp = samples_p[channel];
1162 
1163  for (n = nb_samples / 2; n > 0; n--) {
1164  int v = bytestream2_get_byteu(&gb);
1165  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1166  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1167  }
1168  }
1169  } else {
1170  for (n = nb_samples / 2; n > 0; n--) {
1171  for (channel = 0; channel < avctx->channels; channel++) {
1172  int v = bytestream2_get_byteu(&gb);
1173  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1174  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1175  }
1176  samples += avctx->channels;
1177  }
1178  }
1179  bytestream2_seek(&gb, 0, SEEK_END);
1180  break;
1181  case AV_CODEC_ID_ADPCM_XA:
1182  {
1183  int16_t *out0 = samples_p[0];
1184  int16_t *out1 = samples_p[1];
1185  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1186  int sample_offset = 0;
1187  int bytes_remaining;
1188  while (bytestream2_get_bytes_left(&gb) >= 128) {
1189  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1190  &c->status[0], &c->status[1],
1191  avctx->channels, sample_offset)) < 0)
1192  return ret;
1193  bytestream2_skipu(&gb, 128);
1194  sample_offset += samples_per_block;
1195  }
1196  /* Less than a full block of data left, e.g. when reading from
1197  * 2324 byte per sector XA; the remainder is padding */
1198  bytes_remaining = bytestream2_get_bytes_left(&gb);
1199  if (bytes_remaining > 0) {
1200  bytestream2_skip(&gb, bytes_remaining);
1201  }
1202  break;
1203  }
1205  for (i=0; i<=st; i++) {
1206  c->status[i].step_index = bytestream2_get_le32u(&gb);
1207  if (c->status[i].step_index > 88u) {
1208  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1209  i, c->status[i].step_index);
1210  return AVERROR_INVALIDDATA;
1211  }
1212  }
1213  for (i=0; i<=st; i++) {
1214  c->status[i].predictor = bytestream2_get_le32u(&gb);
1215  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1216  return AVERROR_INVALIDDATA;
1217  }
1218 
1219  for (n = nb_samples >> (1 - st); n > 0; n--) {
1220  int byte = bytestream2_get_byteu(&gb);
1221  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1222  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1223  }
1224  break;
1226  for (n = nb_samples >> (1 - st); n > 0; n--) {
1227  int byte = bytestream2_get_byteu(&gb);
1228  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1229  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1230  }
1231  break;
1232  case AV_CODEC_ID_ADPCM_EA:
1233  {
1234  int previous_left_sample, previous_right_sample;
1235  int current_left_sample, current_right_sample;
1236  int next_left_sample, next_right_sample;
1237  int coeff1l, coeff2l, coeff1r, coeff2r;
1238  int shift_left, shift_right;
1239 
1240  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1241  each coding 28 stereo samples. */
1242 
1243  if(avctx->channels != 2)
1244  return AVERROR_INVALIDDATA;
1245 
1246  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1247  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1248  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1249  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1250 
1251  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1252  int byte = bytestream2_get_byteu(&gb);
1253  coeff1l = ea_adpcm_table[ byte >> 4 ];
1254  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1255  coeff1r = ea_adpcm_table[ byte & 0x0F];
1256  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1257 
1258  byte = bytestream2_get_byteu(&gb);
1259  shift_left = 20 - (byte >> 4);
1260  shift_right = 20 - (byte & 0x0F);
1261 
1262  for (count2 = 0; count2 < 28; count2++) {
1263  byte = bytestream2_get_byteu(&gb);
1264  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1265  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1266 
1267  next_left_sample = (next_left_sample +
1268  (current_left_sample * coeff1l) +
1269  (previous_left_sample * coeff2l) + 0x80) >> 8;
1270  next_right_sample = (next_right_sample +
1271  (current_right_sample * coeff1r) +
1272  (previous_right_sample * coeff2r) + 0x80) >> 8;
1273 
1274  previous_left_sample = current_left_sample;
1275  current_left_sample = av_clip_int16(next_left_sample);
1276  previous_right_sample = current_right_sample;
1277  current_right_sample = av_clip_int16(next_right_sample);
1278  *samples++ = current_left_sample;
1279  *samples++ = current_right_sample;
1280  }
1281  }
1282 
1283  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1284 
1285  break;
1286  }
1288  {
1289  int coeff[2][2], shift[2];
1290 
1291  for(channel = 0; channel < avctx->channels; channel++) {
1292  int byte = bytestream2_get_byteu(&gb);
1293  for (i=0; i<2; i++)
1294  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1295  shift[channel] = 20 - (byte & 0x0F);
1296  }
1297  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1298  int byte[2];
1299 
1300  byte[0] = bytestream2_get_byteu(&gb);
1301  if (st) byte[1] = bytestream2_get_byteu(&gb);
1302  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1303  for(channel = 0; channel < avctx->channels; channel++) {
1304  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1305  sample = (sample +
1306  c->status[channel].sample1 * coeff[channel][0] +
1307  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1308  c->status[channel].sample2 = c->status[channel].sample1;
1309  c->status[channel].sample1 = av_clip_int16(sample);
1310  *samples++ = c->status[channel].sample1;
1311  }
1312  }
1313  }
1314  bytestream2_seek(&gb, 0, SEEK_END);
1315  break;
1316  }
1319  case AV_CODEC_ID_ADPCM_EA_R3: {
1320  /* channel numbering
1321  2chan: 0=fl, 1=fr
1322  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1323  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1324  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1325  int previous_sample, current_sample, next_sample;
1326  int coeff1, coeff2;
1327  int shift;
1328  unsigned int channel;
1329  uint16_t *samplesC;
1330  int count = 0;
1331  int offsets[6];
1332 
1333  for (channel=0; channel<avctx->channels; channel++)
1334  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1335  bytestream2_get_le32(&gb)) +
1336  (avctx->channels + 1) * 4;
1337 
1338  for (channel=0; channel<avctx->channels; channel++) {
1339  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1340  samplesC = samples_p[channel];
1341 
1342  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1343  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1344  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1345  } else {
1346  current_sample = c->status[channel].predictor;
1347  previous_sample = c->status[channel].prev_sample;
1348  }
1349 
1350  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1351  int byte = bytestream2_get_byte(&gb);
1352  if (byte == 0xEE) { /* only seen in R2 and R3 */
1353  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1354  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1355 
1356  for (count2=0; count2<28; count2++)
1357  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1358  } else {
1359  coeff1 = ea_adpcm_table[ byte >> 4 ];
1360  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1361  shift = 20 - (byte & 0x0F);
1362 
1363  for (count2=0; count2<28; count2++) {
1364  if (count2 & 1)
1365  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1366  else {
1367  byte = bytestream2_get_byte(&gb);
1368  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1369  }
1370 
1371  next_sample += (current_sample * coeff1) +
1372  (previous_sample * coeff2);
1373  next_sample = av_clip_int16(next_sample >> 8);
1374 
1375  previous_sample = current_sample;
1376  current_sample = next_sample;
1377  *samplesC++ = current_sample;
1378  }
1379  }
1380  }
1381  if (!count) {
1382  count = count1;
1383  } else if (count != count1) {
1384  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1385  count = FFMAX(count, count1);
1386  }
1387 
1388  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1389  c->status[channel].predictor = current_sample;
1390  c->status[channel].prev_sample = previous_sample;
1391  }
1392  }
1393 
1394  frame->nb_samples = count * 28;
1395  bytestream2_seek(&gb, 0, SEEK_END);
1396  break;
1397  }
1399  for (channel=0; channel<avctx->channels; channel++) {
1400  int coeff[2][4], shift[4];
1401  int16_t *s = samples_p[channel];
1402  for (n = 0; n < 4; n++, s += 32) {
1403  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1404  for (i=0; i<2; i++)
1405  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1406  s[0] = val & ~0x0F;
1407 
1408  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1409  shift[n] = 20 - (val & 0x0F);
1410  s[1] = val & ~0x0F;
1411  }
1412 
1413  for (m=2; m<32; m+=2) {
1414  s = &samples_p[channel][m];
1415  for (n = 0; n < 4; n++, s += 32) {
1416  int level, pred;
1417  int byte = bytestream2_get_byteu(&gb);
1418 
1419  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1420  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1421  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1422 
1423  level = sign_extend(byte, 4) * (1 << shift[n]);
1424  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1425  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1426  }
1427  }
1428  }
1429  break;
1431  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1432  c->status[0].step_index = bytestream2_get_byteu(&gb);
1433  bytestream2_skipu(&gb, 5);
1434  if (c->status[0].step_index > 88u) {
1435  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1436  c->status[0].step_index);
1437  return AVERROR_INVALIDDATA;
1438  }
1439 
1440  for (n = nb_samples >> (1 - st); n > 0; n--) {
1441  int v = bytestream2_get_byteu(&gb);
1442 
1443  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1444  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1445  }
1446  break;
1448  for (i = 0; i < avctx->channels; i++) {
1449  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1450  c->status[i].step_index = bytestream2_get_byteu(&gb);
1451  bytestream2_skipu(&gb, 1);
1452  if (c->status[i].step_index > 88u) {
1453  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1454  c->status[i].step_index);
1455  return AVERROR_INVALIDDATA;
1456  }
1457  }
1458 
1459  for (n = nb_samples >> (1 - st); n > 0; n--) {
1460  int v = bytestream2_get_byteu(&gb);
1461 
1462  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4, 3);
1463  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf, 3);
1464  }
1465  break;
1466  case AV_CODEC_ID_ADPCM_CT:
1467  for (n = nb_samples >> (1 - st); n > 0; n--) {
1468  int v = bytestream2_get_byteu(&gb);
1469  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1470  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1471  }
1472  break;
1476  if (!c->status[0].step_index) {
1477  /* the first byte is a raw sample */
1478  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1479  if (st)
1480  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1481  c->status[0].step_index = 1;
1482  nb_samples--;
1483  }
1484  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1485  for (n = nb_samples >> (1 - st); n > 0; n--) {
1486  int byte = bytestream2_get_byteu(&gb);
1487  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1488  byte >> 4, 4, 0);
1489  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1490  byte & 0x0F, 4, 0);
1491  }
1492  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1493  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1494  int byte = bytestream2_get_byteu(&gb);
1495  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1496  byte >> 5 , 3, 0);
1497  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1498  (byte >> 2) & 0x07, 3, 0);
1499  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1500  byte & 0x03, 2, 0);
1501  }
1502  } else {
1503  for (n = nb_samples >> (2 - st); n > 0; n--) {
1504  int byte = bytestream2_get_byteu(&gb);
1505  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1506  byte >> 6 , 2, 2);
1507  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1508  (byte >> 4) & 0x03, 2, 2);
1509  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1510  (byte >> 2) & 0x03, 2, 2);
1511  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1512  byte & 0x03, 2, 2);
1513  }
1514  }
1515  break;
1516  case AV_CODEC_ID_ADPCM_SWF:
1517  adpcm_swf_decode(avctx, buf, buf_size, samples);
1518  bytestream2_seek(&gb, 0, SEEK_END);
1519  break;
1521  for (n = nb_samples >> (1 - st); n > 0; n--) {
1522  int v = bytestream2_get_byteu(&gb);
1523  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1524  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1525  }
1526  break;
1528  if (!c->has_status) {
1529  for (channel = 0; channel < avctx->channels; channel++)
1530  c->status[channel].step = 0;
1531  c->has_status = 1;
1532  }
1533  for (channel = 0; channel < avctx->channels; channel++) {
1534  samples = samples_p[channel];
1535  for (n = nb_samples >> 1; n > 0; n--) {
1536  int v = bytestream2_get_byteu(&gb);
1537  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1538  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1539  }
1540  }
1541  break;
1542  case AV_CODEC_ID_ADPCM_AFC:
1543  {
1544  int samples_per_block;
1545  int blocks;
1546 
1547  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1548  samples_per_block = avctx->extradata[0] / 16;
1549  blocks = nb_samples / avctx->extradata[0];
1550  } else {
1551  samples_per_block = nb_samples / 16;
1552  blocks = 1;
1553  }
1554 
1555  for (m = 0; m < blocks; m++) {
1556  for (channel = 0; channel < avctx->channels; channel++) {
1557  int prev1 = c->status[channel].sample1;
1558  int prev2 = c->status[channel].sample2;
1559 
1560  samples = samples_p[channel] + m * 16;
1561  /* Read in every sample for this channel. */
1562  for (i = 0; i < samples_per_block; i++) {
1563  int byte = bytestream2_get_byteu(&gb);
1564  int scale = 1 << (byte >> 4);
1565  int index = byte & 0xf;
1566  int factor1 = ff_adpcm_afc_coeffs[0][index];
1567  int factor2 = ff_adpcm_afc_coeffs[1][index];
1568 
1569  /* Decode 16 samples. */
1570  for (n = 0; n < 16; n++) {
1571  int32_t sampledat;
1572 
1573  if (n & 1) {
1574  sampledat = sign_extend(byte, 4);
1575  } else {
1576  byte = bytestream2_get_byteu(&gb);
1577  sampledat = sign_extend(byte >> 4, 4);
1578  }
1579 
1580  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1581  sampledat * scale;
1582  *samples = av_clip_int16(sampledat);
1583  prev2 = prev1;
1584  prev1 = *samples++;
1585  }
1586  }
1587 
1588  c->status[channel].sample1 = prev1;
1589  c->status[channel].sample2 = prev2;
1590  }
1591  }
1592  bytestream2_seek(&gb, 0, SEEK_END);
1593  break;
1594  }
1595  case AV_CODEC_ID_ADPCM_THP:
1597  {
1598  int table[14][16];
1599  int ch;
1600 
1601 #define THP_GET16(g) \
1602  sign_extend( \
1603  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1604  bytestream2_get_le16u(&(g)) : \
1605  bytestream2_get_be16u(&(g)), 16)
1606 
1607  if (avctx->extradata) {
1609  if (avctx->extradata_size < 32 * avctx->channels) {
1610  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1611  return AVERROR_INVALIDDATA;
1612  }
1613 
1614  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1615  for (i = 0; i < avctx->channels; i++)
1616  for (n = 0; n < 16; n++)
1617  table[i][n] = THP_GET16(tb);
1618  } else {
1619  for (i = 0; i < avctx->channels; i++)
1620  for (n = 0; n < 16; n++)
1621  table[i][n] = THP_GET16(gb);
1622 
1623  if (!c->has_status) {
1624  /* Initialize the previous sample. */
1625  for (i = 0; i < avctx->channels; i++) {
1626  c->status[i].sample1 = THP_GET16(gb);
1627  c->status[i].sample2 = THP_GET16(gb);
1628  }
1629  c->has_status = 1;
1630  } else {
1631  bytestream2_skip(&gb, avctx->channels * 4);
1632  }
1633  }
1634 
1635  for (ch = 0; ch < avctx->channels; ch++) {
1636  samples = samples_p[ch];
1637 
1638  /* Read in every sample for this channel. */
1639  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1640  int byte = bytestream2_get_byteu(&gb);
1641  int index = (byte >> 4) & 7;
1642  unsigned int exp = byte & 0x0F;
1643  int64_t factor1 = table[ch][index * 2];
1644  int64_t factor2 = table[ch][index * 2 + 1];
1645 
1646  /* Decode 14 samples. */
1647  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1648  int32_t sampledat;
1649 
1650  if (n & 1) {
1651  sampledat = sign_extend(byte, 4);
1652  } else {
1653  byte = bytestream2_get_byteu(&gb);
1654  sampledat = sign_extend(byte >> 4, 4);
1655  }
1656 
1657  sampledat = ((c->status[ch].sample1 * factor1
1658  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
1659  *samples = av_clip_int16(sampledat);
1660  c->status[ch].sample2 = c->status[ch].sample1;
1661  c->status[ch].sample1 = *samples++;
1662  }
1663  }
1664  }
1665  break;
1666  }
1667  case AV_CODEC_ID_ADPCM_DTK:
1668  for (channel = 0; channel < avctx->channels; channel++) {
1669  samples = samples_p[channel];
1670 
1671  /* Read in every sample for this channel. */
1672  for (i = 0; i < nb_samples / 28; i++) {
1673  int byte, header;
1674  if (channel)
1675  bytestream2_skipu(&gb, 1);
1676  header = bytestream2_get_byteu(&gb);
1677  bytestream2_skipu(&gb, 3 - channel);
1678 
1679  /* Decode 28 samples. */
1680  for (n = 0; n < 28; n++) {
1681  int32_t sampledat, prev;
1682 
1683  switch (header >> 4) {
1684  case 1:
1685  prev = (c->status[channel].sample1 * 0x3c);
1686  break;
1687  case 2:
1688  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1689  break;
1690  case 3:
1691  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1692  break;
1693  default:
1694  prev = 0;
1695  }
1696 
1697  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1698 
1699  byte = bytestream2_get_byteu(&gb);
1700  if (!channel)
1701  sampledat = sign_extend(byte, 4);
1702  else
1703  sampledat = sign_extend(byte >> 4, 4);
1704 
1705  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1706  *samples++ = av_clip_int16(sampledat >> 6);
1707  c->status[channel].sample2 = c->status[channel].sample1;
1708  c->status[channel].sample1 = sampledat;
1709  }
1710  }
1711  if (!channel)
1712  bytestream2_seek(&gb, 0, SEEK_SET);
1713  }
1714  break;
1715  case AV_CODEC_ID_ADPCM_PSX:
1716  for (channel = 0; channel < avctx->channels; channel++) {
1717  samples = samples_p[channel];
1718 
1719  /* Read in every sample for this channel. */
1720  for (i = 0; i < nb_samples / 28; i++) {
1721  int filter, shift, flag, byte;
1722 
1723  filter = bytestream2_get_byteu(&gb);
1724  shift = filter & 0xf;
1725  filter = filter >> 4;
1727  return AVERROR_INVALIDDATA;
1728  flag = bytestream2_get_byteu(&gb);
1729 
1730  /* Decode 28 samples. */
1731  for (n = 0; n < 28; n++) {
1732  int sample = 0, scale;
1733 
1734  if (flag < 0x07) {
1735  if (n & 1) {
1736  scale = sign_extend(byte >> 4, 4);
1737  } else {
1738  byte = bytestream2_get_byteu(&gb);
1739  scale = sign_extend(byte, 4);
1740  }
1741 
1742  scale = scale * (1 << 12);
1743  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
1744  }
1745  *samples++ = av_clip_int16(sample);
1746  c->status[channel].sample2 = c->status[channel].sample1;
1747  c->status[channel].sample1 = sample;
1748  }
1749  }
1750  }
1751  break;
1752 
1753  default:
1754  av_assert0(0); // unsupported codec_id should not happen
1755  }
1756 
1757  if (avpkt->size && bytestream2_tell(&gb) == 0) {
1758  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
1759  return AVERROR_INVALIDDATA;
1760  }
1761 
1762  *got_frame_ptr = 1;
1763 
1764  if (avpkt->size < bytestream2_tell(&gb)) {
1765  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
1766  return avpkt->size;
1767  }
1768 
1769  return bytestream2_tell(&gb);
1770 }
1771 
1772 static void adpcm_flush(AVCodecContext *avctx)
1773 {
1774  ADPCMDecodeContext *c = avctx->priv_data;
1775  c->has_status = 0;
1776 }
1777 
1778 
1786 
1787 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
1788 AVCodec ff_ ## name_ ## _decoder = { \
1789  .name = #name_, \
1790  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
1791  .type = AVMEDIA_TYPE_AUDIO, \
1792  .id = id_, \
1793  .priv_data_size = sizeof(ADPCMDecodeContext), \
1794  .init = adpcm_decode_init, \
1795  .decode = adpcm_decode_frame, \
1796  .flush = adpcm_flush, \
1797  .capabilities = AV_CODEC_CAP_DR1, \
1798  .sample_fmts = sample_fmts_, \
1799 }
1800 
1801 /* Note: Do not forget to add new entries to the Makefile as well. */
1802 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
1803 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
1804 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
1805 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
1806 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
1807 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
1808 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
1809 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
1810 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
1811 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
1812 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
1813 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
1814 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
1815 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
1816 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
1817 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
1818 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
1819 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
1820 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
1821 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
1822 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
1823 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
1824 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
1825 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
1826 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
1827 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
1828 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_s16, adpcm_ms, "ADPCM Microsoft");
1829 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
1830 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
1831 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
1832 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
1833 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
1834 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
1835 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
1836 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
1837 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
1838 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: avcodec.h:508
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: avcodec.h:502
level
uint8_t level
Definition: svq3.c:207
ff_adpcm_oki_step_table
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: avcodec.h:536
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:35
GetByteContext
Definition: bytestream.h:33
n
int n
Definition: avisynth_c.h:760
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
count
void INT64 INT64 count
Definition: avisynth_c.h:767
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
table
static const uint16_t table[]
Definition: prosumer.c:206
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: avcodec.h:523
data
const char data[16]
Definition: mxf.c:91
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: avcodec.h:541
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: avcodec.h:535
channels
channels
Definition: aptx.c:30
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: avcodec.h:539
bytestream2_get_bytes_left
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:364
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: avcodec.h:514
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
THP_GET16
#define THP_GET16(g)
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:1574
GetBitContext
Definition: get_bits.h:61
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:228
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:1772
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:77
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: avcodec.h:519
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:1781
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:384
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:41
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: avcodec.h:534
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: avcodec.h:525
g
const char * g
Definition: vf_curves.c:115
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: avcodec.h:504
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: avcodec.h:531
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: avcodec.h:529
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:90
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: avcodec.h:507
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:303
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
Definition: adpcm.c:277
int32_t
int32_t
Definition: audio_convert.c:194
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:1779
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: avcodec.h:527
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: avcodec.h:516
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: avcodec.h:506
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: avcodec.h:526
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: avcodec.h:505
ff_adpcm_mtaf_stepsize
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: avcodec.h:521
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:68
exp
int8_t exp
Definition: eval.c:72
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:42
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: avcodec.h:510
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:343
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:322
ADPCMDecodeContext
Definition: adpcm.c:86
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AVPacket::size
int size
Definition: avcodec.h:1478
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:95
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: avcodec.h:537
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
bps
unsigned bps
Definition: movenc.c:1497
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2233
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples that will be decoded from the packet.
Definition: adpcm.c:563
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: avcodec.h:515
size
int size
Definition: twinvq_data.h:11134
val
const char const char void * val
Definition: avisynth_c.h:863
header
static const uint8_t header[24]
Definition: sdr2.c:67
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:407
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
ADPCM_DECODER
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:1787
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:2226
AVCodec::id
enum AVCodecID id
Definition: avcodec.h:3495
flag
#define flag(name)
Definition: cbs_av1.c:557
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2789
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:1783
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: avcodec.h:543
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: avcodec.h:528
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
delta
float delta
Definition: vorbis_enc_data.h:457
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:668
uint8_t
uint8_t
Definition: audio_convert.c:194
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
tb
#define tb
Definition: regdef.h:68
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:88
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: avcodec.h:542
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:60
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: avcodec.h:512
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:2262
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:790
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:254
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: avcodec.h:544
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: avcodec.h:522
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: avcodec.h:524
temp
else temp
Definition: vf_mcdeint.c:256
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
shift
static int shift(int a, int b)
Definition: sonic.c:82
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: avcodec.h:520
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: avcodec.h:517
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:495
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:92
ADPCMDecodeContext::has_status
int has_status
Definition: adpcm.c:89
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: avcodec.h:503
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:34
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_adpcm_afc_coeffs
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:765
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: avcodec.h:509
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:184
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: avcodec.h:540
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:398
ff_adpcm_index_tables
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
int
int
Definition: ffmpeg_filter.c:191
ADPCMChannelStatus
Definition: adpcm.h:33
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: avcodec.h:518
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:87
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:77