FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  *
16  * This file is part of FFmpeg.
17  *
18  * FFmpeg is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU Lesser General Public
20  * License as published by the Free Software Foundation; either
21  * version 2.1 of the License, or (at your option) any later version.
22  *
23  * FFmpeg is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26  * Lesser General Public License for more details.
27  *
28  * You should have received a copy of the GNU Lesser General Public
29  * License along with FFmpeg; if not, write to the Free Software
30  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31  */
32 #include "avcodec.h"
33 #include "get_bits.h"
34 #include "bytestream.h"
35 #include "adpcm.h"
36 #include "adpcm_data.h"
37 #include "internal.h"
38 
39 /**
40  * @file
41  * ADPCM decoders
42  * Features and limitations:
43  *
44  * Reference documents:
45  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
46  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
47  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
48  * http://openquicktime.sourceforge.net/
49  * XAnim sources (xa_codec.c) http://xanim.polter.net/
50  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
51  * SoX source code http://sox.sourceforge.net/
52  *
53  * CD-ROM XA:
54  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
55  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
56  * readstr http://www.geocities.co.jp/Playtown/2004/
57  */
58 
59 /* These are for CD-ROM XA ADPCM */
60 static const int8_t xa_adpcm_table[5][2] = {
61  { 0, 0 },
62  { 60, 0 },
63  { 115, -52 },
64  { 98, -55 },
65  { 122, -60 }
66 };
67 
68 static const int16_t ea_adpcm_table[] = {
69  0, 240, 460, 392,
70  0, 0, -208, -220,
71  0, 1, 3, 4,
72  7, 8, 10, 11,
73  0, -1, -3, -4
74 };
75 
76 // padded to zero where table size is less then 16
77 static const int8_t swf_index_tables[4][16] = {
78  /*2*/ { -1, 2 },
79  /*3*/ { -1, -1, 2, 4 },
80  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
81  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
82 };
83 
84 /* end of tables */
85 
86 typedef struct ADPCMDecodeContext {
88  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
91 
93 {
94  ADPCMDecodeContext *c = avctx->priv_data;
95  unsigned int min_channels = 1;
96  unsigned int max_channels = 2;
97 
98  switch(avctx->codec->id) {
101  min_channels = 2;
102  break;
109  max_channels = 6;
110  break;
112  min_channels = 2;
113  max_channels = 8;
114  if (avctx->channels & 1) {
115  avpriv_request_sample(avctx, "channel count %d\n", avctx->channels);
116  return AVERROR_PATCHWELCOME;
117  }
118  break;
120  max_channels = 8;
121  break;
125  max_channels = 14;
126  break;
127  }
128  if (avctx->channels < min_channels || avctx->channels > max_channels) {
129  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
130  return AVERROR(EINVAL);
131  }
132 
133  switch(avctx->codec->id) {
135  c->status[0].step = c->status[1].step = 511;
136  break;
138  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
139  return AVERROR_INVALIDDATA;
140  break;
142  if (avctx->extradata && avctx->extradata_size >= 8) {
143  c->status[0].predictor = AV_RL32(avctx->extradata);
144  c->status[1].predictor = AV_RL32(avctx->extradata + 4);
145  }
146  break;
148  if (avctx->extradata && avctx->extradata_size >= 2)
149  c->vqa_version = AV_RL16(avctx->extradata);
150  break;
151  default:
152  break;
153  }
154 
155  switch(avctx->codec->id) {
173  break;
175  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
177  break;
179  avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
181  break;
182  default:
183  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
184  }
185 
186  return 0;
187 }
188 
189 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
190 {
191  int delta, pred, step, add;
192 
193  pred = c->predictor;
194  delta = nibble & 7;
195  step = c->step;
196  add = (delta * 2 + 1) * step;
197  if (add < 0)
198  add = add + 7;
199 
200  if ((nibble & 8) == 0)
201  pred = av_clip(pred + (add >> 3), -32767, 32767);
202  else
203  pred = av_clip(pred - (add >> 3), -32767, 32767);
204 
205  switch (delta) {
206  case 7:
207  step *= 0x99;
208  break;
209  case 6:
210  c->step = av_clip(c->step * 2, 127, 24576);
211  c->predictor = pred;
212  return pred;
213  case 5:
214  step *= 0x66;
215  break;
216  case 4:
217  step *= 0x4d;
218  break;
219  default:
220  step *= 0x39;
221  break;
222  }
223 
224  if (step < 0)
225  step += 0x3f;
226 
227  c->step = step >> 6;
228  c->step = av_clip(c->step, 127, 24576);
229  c->predictor = pred;
230  return pred;
231 }
232 
233 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
234 {
235  int step_index;
236  int predictor;
237  int sign, delta, diff, step;
238 
239  step = ff_adpcm_step_table[c->step_index];
240  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
241  step_index = av_clip(step_index, 0, 88);
242 
243  sign = nibble & 8;
244  delta = nibble & 7;
245  /* perform direct multiplication instead of series of jumps proposed by
246  * the reference ADPCM implementation since modern CPUs can do the mults
247  * quickly enough */
248  diff = ((2 * delta + 1) * step) >> shift;
249  predictor = c->predictor;
250  if (sign) predictor -= diff;
251  else predictor += diff;
252 
253  c->predictor = av_clip_int16(predictor);
254  c->step_index = step_index;
255 
256  return (int16_t)c->predictor;
257 }
258 
260 {
261  int nibble, step_index, predictor, sign, delta, diff, step, shift;
262 
263  shift = bps - 1;
264  nibble = get_bits_le(gb, bps),
265  step = ff_adpcm_step_table[c->step_index];
266  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
267  step_index = av_clip(step_index, 0, 88);
268 
269  sign = nibble & (1 << shift);
270  delta = av_mod_uintp2(nibble, shift);
271  diff = ((2 * delta + 1) * step) >> shift;
272  predictor = c->predictor;
273  if (sign) predictor -= diff;
274  else predictor += diff;
275 
276  c->predictor = av_clip_int16(predictor);
277  c->step_index = step_index;
278 
279  return (int16_t)c->predictor;
280 }
281 
282 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
283 {
284  int step_index;
285  int predictor;
286  int diff, step;
287 
288  step = ff_adpcm_step_table[c->step_index];
289  step_index = c->step_index + ff_adpcm_index_table[nibble];
290  step_index = av_clip(step_index, 0, 88);
291 
292  diff = step >> 3;
293  if (nibble & 4) diff += step;
294  if (nibble & 2) diff += step >> 1;
295  if (nibble & 1) diff += step >> 2;
296 
297  if (nibble & 8)
298  predictor = c->predictor - diff;
299  else
300  predictor = c->predictor + diff;
301 
302  c->predictor = av_clip_int16(predictor);
303  c->step_index = step_index;
304 
305  return c->predictor;
306 }
307 
308 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
309 {
310  int predictor;
311 
312  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
313  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
314 
315  c->sample2 = c->sample1;
316  c->sample1 = av_clip_int16(predictor);
317  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
318  if (c->idelta < 16) c->idelta = 16;
319  if (c->idelta > INT_MAX/768) {
320  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
321  c->idelta = INT_MAX/768;
322  }
323 
324  return c->sample1;
325 }
326 
327 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
328 {
329  int step_index, predictor, sign, delta, diff, step;
330 
332  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
333  step_index = av_clip(step_index, 0, 48);
334 
335  sign = nibble & 8;
336  delta = nibble & 7;
337  diff = ((2 * delta + 1) * step) >> 3;
338  predictor = c->predictor;
339  if (sign) predictor -= diff;
340  else predictor += diff;
341 
342  c->predictor = av_clip_intp2(predictor, 11);
343  c->step_index = step_index;
344 
345  return c->predictor * 16;
346 }
347 
348 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
349 {
350  int sign, delta, diff;
351  int new_step;
352 
353  sign = nibble & 8;
354  delta = nibble & 7;
355  /* perform direct multiplication instead of series of jumps proposed by
356  * the reference ADPCM implementation since modern CPUs can do the mults
357  * quickly enough */
358  diff = ((2 * delta + 1) * c->step) >> 3;
359  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
360  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
361  c->predictor = av_clip_int16(c->predictor);
362  /* calculate new step and clamp it to range 511..32767 */
363  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
364  c->step = av_clip(new_step, 511, 32767);
365 
366  return (int16_t)c->predictor;
367 }
368 
369 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
370 {
371  int sign, delta, diff;
372 
373  sign = nibble & (1<<(size-1));
374  delta = nibble & ((1<<(size-1))-1);
375  diff = delta << (7 + c->step + shift);
376 
377  /* clamp result */
378  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
379 
380  /* calculate new step */
381  if (delta >= (2*size - 3) && c->step < 3)
382  c->step++;
383  else if (delta == 0 && c->step > 0)
384  c->step--;
385 
386  return (int16_t) c->predictor;
387 }
388 
390 {
391  if(!c->step) {
392  c->predictor = 0;
393  c->step = 127;
394  }
395 
396  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
397  c->predictor = av_clip_int16(c->predictor);
398  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
399  c->step = av_clip(c->step, 127, 24576);
400  return c->predictor;
401 }
402 
403 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
404 {
405  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
406  c->predictor = av_clip_int16(c->predictor);
407  c->step += ff_adpcm_index_table[nibble];
408  c->step = av_clip_uintp2(c->step, 5);
409  return c->predictor;
410 }
411 
412 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
414  ADPCMChannelStatus *right, int channels, int sample_offset)
415 {
416  int i, j;
417  int shift,filter,f0,f1;
418  int s_1,s_2;
419  int d,s,t;
420 
421  out0 += sample_offset;
422  if (channels == 1)
423  out1 = out0 + 28;
424  else
425  out1 += sample_offset;
426 
427  for(i=0;i<4;i++) {
428  shift = 12 - (in[4+i*2] & 15);
429  filter = in[4+i*2] >> 4;
430  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
431  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
432  filter=0;
433  }
434  f0 = xa_adpcm_table[filter][0];
435  f1 = xa_adpcm_table[filter][1];
436 
437  s_1 = left->sample1;
438  s_2 = left->sample2;
439 
440  for(j=0;j<28;j++) {
441  d = in[16+i+j*4];
442 
443  t = sign_extend(d, 4);
444  s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
445  s_2 = s_1;
446  s_1 = av_clip_int16(s);
447  out0[j] = s_1;
448  }
449 
450  if (channels == 2) {
451  left->sample1 = s_1;
452  left->sample2 = s_2;
453  s_1 = right->sample1;
454  s_2 = right->sample2;
455  }
456 
457  shift = 12 - (in[5+i*2] & 15);
458  filter = in[5+i*2] >> 4;
459  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
460  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
461  filter=0;
462  }
463 
464  f0 = xa_adpcm_table[filter][0];
465  f1 = xa_adpcm_table[filter][1];
466 
467  for(j=0;j<28;j++) {
468  d = in[16+i+j*4];
469 
470  t = sign_extend(d >> 4, 4);
471  s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
472  s_2 = s_1;
473  s_1 = av_clip_int16(s);
474  out1[j] = s_1;
475  }
476 
477  if (channels == 2) {
478  right->sample1 = s_1;
479  right->sample2 = s_2;
480  } else {
481  left->sample1 = s_1;
482  left->sample2 = s_2;
483  }
484 
485  out0 += 28 * (3 - channels);
486  out1 += 28 * (3 - channels);
487  }
488 
489  return 0;
490 }
491 
492 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
493 {
494  ADPCMDecodeContext *c = avctx->priv_data;
495  GetBitContext gb;
496  const int8_t *table;
497  int k0, signmask, nb_bits, count;
498  int size = buf_size*8;
499  int i;
500 
501  init_get_bits(&gb, buf, size);
502 
503  //read bits & initial values
504  nb_bits = get_bits(&gb, 2)+2;
505  table = swf_index_tables[nb_bits-2];
506  k0 = 1 << (nb_bits-2);
507  signmask = 1 << (nb_bits-1);
508 
509  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
510  for (i = 0; i < avctx->channels; i++) {
511  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
512  c->status[i].step_index = get_bits(&gb, 6);
513  }
514 
515  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
516  int i;
517 
518  for (i = 0; i < avctx->channels; i++) {
519  // similar to IMA adpcm
520  int delta = get_bits(&gb, nb_bits);
522  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
523  int k = k0;
524 
525  do {
526  if (delta & k)
527  vpdiff += step;
528  step >>= 1;
529  k >>= 1;
530  } while(k);
531  vpdiff += step;
532 
533  if (delta & signmask)
534  c->status[i].predictor -= vpdiff;
535  else
536  c->status[i].predictor += vpdiff;
537 
538  c->status[i].step_index += table[delta & (~signmask)];
539 
540  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
541  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
542 
543  *samples++ = c->status[i].predictor;
544  }
545  }
546  }
547 }
548 
549 /**
550  * Get the number of samples that will be decoded from the packet.
551  * In one case, this is actually the maximum number of samples possible to
552  * decode with the given buf_size.
553  *
554  * @param[out] coded_samples set to the number of samples as coded in the
555  * packet, or 0 if the codec does not encode the
556  * number of samples in each frame.
557  * @param[out] approx_nb_samples set to non-zero if the number of samples
558  * returned is an approximation.
559  */
561  int buf_size, int *coded_samples, int *approx_nb_samples)
562 {
563  ADPCMDecodeContext *s = avctx->priv_data;
564  int nb_samples = 0;
565  int ch = avctx->channels;
566  int has_coded_samples = 0;
567  int header_size;
568 
569  *coded_samples = 0;
570  *approx_nb_samples = 0;
571 
572  if(ch <= 0)
573  return 0;
574 
575  switch (avctx->codec->id) {
576  /* constant, only check buf_size */
578  if (buf_size < 76 * ch)
579  return 0;
580  nb_samples = 128;
581  break;
583  if (buf_size < 34 * ch)
584  return 0;
585  nb_samples = 64;
586  break;
587  /* simple 4-bit adpcm */
595  nb_samples = buf_size * 2 / ch;
596  break;
597  }
598  if (nb_samples)
599  return nb_samples;
600 
601  /* simple 4-bit adpcm, with header */
602  header_size = 0;
603  switch (avctx->codec->id) {
607  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
608  case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
609  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
610  }
611  if (header_size > 0)
612  return (buf_size - header_size) * 2 / ch;
613 
614  /* more complex formats */
615  switch (avctx->codec->id) {
617  has_coded_samples = 1;
618  *coded_samples = bytestream2_get_le32(gb);
619  *coded_samples -= *coded_samples % 28;
620  nb_samples = (buf_size - 12) / 30 * 28;
621  break;
623  has_coded_samples = 1;
624  *coded_samples = bytestream2_get_le32(gb);
625  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
626  break;
628  nb_samples = (buf_size - ch) / ch * 2;
629  break;
633  /* maximum number of samples */
634  /* has internal offsets and a per-frame switch to signal raw 16-bit */
635  has_coded_samples = 1;
636  switch (avctx->codec->id) {
638  header_size = 4 + 9 * ch;
639  *coded_samples = bytestream2_get_le32(gb);
640  break;
642  header_size = 4 + 5 * ch;
643  *coded_samples = bytestream2_get_le32(gb);
644  break;
646  header_size = 4 + 5 * ch;
647  *coded_samples = bytestream2_get_be32(gb);
648  break;
649  }
650  *coded_samples -= *coded_samples % 28;
651  nb_samples = (buf_size - header_size) * 2 / ch;
652  nb_samples -= nb_samples % 28;
653  *approx_nb_samples = 1;
654  break;
656  if (avctx->block_align > 0)
657  buf_size = FFMIN(buf_size, avctx->block_align);
658  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
659  break;
661  if (avctx->block_align > 0)
662  buf_size = FFMIN(buf_size, avctx->block_align);
663  if (buf_size < 4 * ch)
664  return AVERROR_INVALIDDATA;
665  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
666  break;
668  if (avctx->block_align > 0)
669  buf_size = FFMIN(buf_size, avctx->block_align);
670  nb_samples = (buf_size - 4 * ch) * 2 / ch;
671  break;
673  {
674  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
675  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
676  if (avctx->block_align > 0)
677  buf_size = FFMIN(buf_size, avctx->block_align);
678  if (buf_size < 4 * ch)
679  return AVERROR_INVALIDDATA;
680  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
681  break;
682  }
684  if (avctx->block_align > 0)
685  buf_size = FFMIN(buf_size, avctx->block_align);
686  nb_samples = (buf_size - 6 * ch) * 2 / ch;
687  break;
689  if (avctx->block_align > 0)
690  buf_size = FFMIN(buf_size, avctx->block_align);
691  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
692  break;
696  {
697  int samples_per_byte;
698  switch (avctx->codec->id) {
699  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
700  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
701  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
702  }
703  if (!s->status[0].step_index) {
704  if (buf_size < ch)
705  return AVERROR_INVALIDDATA;
706  nb_samples++;
707  buf_size -= ch;
708  }
709  nb_samples += buf_size * samples_per_byte / ch;
710  break;
711  }
713  {
714  int buf_bits = buf_size * 8 - 2;
715  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
716  int block_hdr_size = 22 * ch;
717  int block_size = block_hdr_size + nbits * ch * 4095;
718  int nblocks = buf_bits / block_size;
719  int bits_left = buf_bits - nblocks * block_size;
720  nb_samples = nblocks * 4096;
721  if (bits_left >= block_hdr_size)
722  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
723  break;
724  }
727  if (avctx->extradata) {
728  nb_samples = buf_size * 14 / (8 * ch);
729  break;
730  }
731  has_coded_samples = 1;
732  bytestream2_skip(gb, 4); // channel size
733  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
734  bytestream2_get_le32(gb) :
735  bytestream2_get_be32(gb);
736  buf_size -= 8 + 36 * ch;
737  buf_size /= ch;
738  nb_samples = buf_size / 8 * 14;
739  if (buf_size % 8 > 1)
740  nb_samples += (buf_size % 8 - 1) * 2;
741  *approx_nb_samples = 1;
742  break;
744  nb_samples = buf_size / (9 * ch) * 16;
745  break;
747  nb_samples = (buf_size / 128) * 224 / ch;
748  break;
751  nb_samples = buf_size / (16 * ch) * 28;
752  break;
753  }
754 
755  /* validate coded sample count */
756  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
757  return AVERROR_INVALIDDATA;
758 
759  return nb_samples;
760 }
761 
762 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
763  int *got_frame_ptr, AVPacket *avpkt)
764 {
765  AVFrame *frame = data;
766  const uint8_t *buf = avpkt->data;
767  int buf_size = avpkt->size;
768  ADPCMDecodeContext *c = avctx->priv_data;
769  ADPCMChannelStatus *cs;
770  int n, m, channel, i;
771  int16_t *samples;
772  int16_t **samples_p;
773  int st; /* stereo */
774  int count1, count2;
775  int nb_samples, coded_samples, approx_nb_samples, ret;
776  GetByteContext gb;
777 
778  bytestream2_init(&gb, buf, buf_size);
779  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
780  if (nb_samples <= 0) {
781  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
782  return AVERROR_INVALIDDATA;
783  }
784 
785  /* get output buffer */
786  frame->nb_samples = nb_samples;
787  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
788  return ret;
789  samples = (int16_t *)frame->data[0];
790  samples_p = (int16_t **)frame->extended_data;
791 
792  /* use coded_samples when applicable */
793  /* it is always <= nb_samples, so the output buffer will be large enough */
794  if (coded_samples) {
795  if (!approx_nb_samples && coded_samples != nb_samples)
796  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
797  frame->nb_samples = nb_samples = coded_samples;
798  }
799 
800  st = avctx->channels == 2 ? 1 : 0;
801 
802  switch(avctx->codec->id) {
804  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
805  Channel data is interleaved per-chunk. */
806  for (channel = 0; channel < avctx->channels; channel++) {
807  int predictor;
808  int step_index;
809  cs = &(c->status[channel]);
810  /* (pppppp) (piiiiiii) */
811 
812  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
813  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
814  step_index = predictor & 0x7F;
815  predictor &= ~0x7F;
816 
817  if (cs->step_index == step_index) {
818  int diff = predictor - cs->predictor;
819  if (diff < 0)
820  diff = - diff;
821  if (diff > 0x7f)
822  goto update;
823  } else {
824  update:
825  cs->step_index = step_index;
826  cs->predictor = predictor;
827  }
828 
829  if (cs->step_index > 88u){
830  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
831  channel, cs->step_index);
832  return AVERROR_INVALIDDATA;
833  }
834 
835  samples = samples_p[channel];
836 
837  for (m = 0; m < 64; m += 2) {
838  int byte = bytestream2_get_byteu(&gb);
839  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F, 3);
840  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 , 3);
841  }
842  }
843  break;
845  for(i=0; i<avctx->channels; i++){
846  cs = &(c->status[i]);
847  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
848 
849  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
850  if (cs->step_index > 88u){
851  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
852  i, cs->step_index);
853  return AVERROR_INVALIDDATA;
854  }
855  }
856 
857  if (avctx->bits_per_coded_sample != 4) {
858  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
859  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
862 
863  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
864  for (i = 0; i < avctx->channels; i++) {
865  int j;
866 
867  cs = &c->status[i];
868  samples = &samples_p[i][1 + n * samples_per_block];
869  for (j = 0; j < block_size; j++) {
870  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
871  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
872  }
873  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
874  if (ret < 0)
875  return ret;
876  for (m = 0; m < samples_per_block; m++) {
877  samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
878  avctx->bits_per_coded_sample);
879  }
880  }
881  }
882  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
883  } else {
884  for (n = 0; n < (nb_samples - 1) / 8; n++) {
885  for (i = 0; i < avctx->channels; i++) {
886  cs = &c->status[i];
887  samples = &samples_p[i][1 + n * 8];
888  for (m = 0; m < 8; m += 2) {
889  int v = bytestream2_get_byteu(&gb);
890  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
891  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
892  }
893  }
894  }
895  }
896  break;
898  for (i = 0; i < avctx->channels; i++)
899  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
900 
901  for (i = 0; i < avctx->channels; i++) {
902  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
903  if (c->status[i].step_index > 88u) {
904  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
905  i, c->status[i].step_index);
906  return AVERROR_INVALIDDATA;
907  }
908  }
909 
910  for (i = 0; i < avctx->channels; i++) {
911  samples = (int16_t *)frame->data[i];
912  cs = &c->status[i];
913  for (n = nb_samples >> 1; n > 0; n--) {
914  int v = bytestream2_get_byteu(&gb);
915  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
916  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
917  }
918  }
919  break;
921  for (i = 0; i < avctx->channels; i++)
922  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
923  for (i = 0; i < avctx->channels; i++)
924  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
925 
926  for (n = 0; n < nb_samples >> (1 - st); n++) {
927  int v = bytestream2_get_byteu(&gb);
928  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
929  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
930  }
931  break;
933  {
934  int block_predictor;
935 
936  if (avctx->channels > 2) {
937  for (channel = 0; channel < avctx->channels; channel++) {
938  samples = samples_p[channel];
939  block_predictor = bytestream2_get_byteu(&gb);
940  if (block_predictor > 6) {
941  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
942  channel, block_predictor);
943  return AVERROR_INVALIDDATA;
944  }
945  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
946  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
947  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
948  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
949  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
950  *samples++ = c->status[channel].sample2;
951  *samples++ = c->status[channel].sample1;
952  for(n = (nb_samples - 2) >> 1; n > 0; n--) {
953  int byte = bytestream2_get_byteu(&gb);
954  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
955  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
956  }
957  }
958  } else {
959  block_predictor = bytestream2_get_byteu(&gb);
960  if (block_predictor > 6) {
961  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
962  block_predictor);
963  return AVERROR_INVALIDDATA;
964  }
965  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
966  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
967  if (st) {
968  block_predictor = bytestream2_get_byteu(&gb);
969  if (block_predictor > 6) {
970  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
971  block_predictor);
972  return AVERROR_INVALIDDATA;
973  }
974  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
975  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
976  }
977  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
978  if (st){
979  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
980  }
981 
982  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
983  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
984  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
985  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
986 
987  *samples++ = c->status[0].sample2;
988  if (st) *samples++ = c->status[1].sample2;
989  *samples++ = c->status[0].sample1;
990  if (st) *samples++ = c->status[1].sample1;
991  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
992  int byte = bytestream2_get_byteu(&gb);
993  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
994  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
995  }
996  }
997  break;
998  }
1000  for (channel = 0; channel < avctx->channels; channel+=2) {
1001  bytestream2_skipu(&gb, 4);
1002  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1003  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1004  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1005  bytestream2_skipu(&gb, 2);
1006  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1007  bytestream2_skipu(&gb, 2);
1008  for (n = 0; n < nb_samples; n+=2) {
1009  int v = bytestream2_get_byteu(&gb);
1010  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1011  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1012  }
1013  for (n = 0; n < nb_samples; n+=2) {
1014  int v = bytestream2_get_byteu(&gb);
1015  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1016  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1017  }
1018  }
1019  break;
1021  for (channel = 0; channel < avctx->channels; channel++) {
1022  cs = &c->status[channel];
1023  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1024  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1025  if (cs->step_index > 88u){
1026  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1027  channel, cs->step_index);
1028  return AVERROR_INVALIDDATA;
1029  }
1030  }
1031  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1032  int v = bytestream2_get_byteu(&gb);
1033  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1034  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1035  }
1036  break;
1038  {
1039  int last_byte = 0;
1040  int nibble;
1041  int decode_top_nibble_next = 0;
1042  int diff_channel;
1043  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1044 
1045  bytestream2_skipu(&gb, 10);
1046  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1047  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1048  c->status[0].step_index = bytestream2_get_byteu(&gb);
1049  c->status[1].step_index = bytestream2_get_byteu(&gb);
1050  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1051  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1052  c->status[0].step_index, c->status[1].step_index);
1053  return AVERROR_INVALIDDATA;
1054  }
1055  /* sign extend the predictors */
1056  diff_channel = c->status[1].predictor;
1057 
1058  /* DK3 ADPCM support macro */
1059 #define DK3_GET_NEXT_NIBBLE() \
1060  if (decode_top_nibble_next) { \
1061  nibble = last_byte >> 4; \
1062  decode_top_nibble_next = 0; \
1063  } else { \
1064  last_byte = bytestream2_get_byteu(&gb); \
1065  nibble = last_byte & 0x0F; \
1066  decode_top_nibble_next = 1; \
1067  }
1068 
1069  while (samples < samples_end) {
1070 
1071  /* for this algorithm, c->status[0] is the sum channel and
1072  * c->status[1] is the diff channel */
1073 
1074  /* process the first predictor of the sum channel */
1076  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1077 
1078  /* process the diff channel predictor */
1080  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1081 
1082  /* process the first pair of stereo PCM samples */
1083  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1084  *samples++ = c->status[0].predictor + c->status[1].predictor;
1085  *samples++ = c->status[0].predictor - c->status[1].predictor;
1086 
1087  /* process the second predictor of the sum channel */
1089  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1090 
1091  /* process the second pair of stereo PCM samples */
1092  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1093  *samples++ = c->status[0].predictor + c->status[1].predictor;
1094  *samples++ = c->status[0].predictor - c->status[1].predictor;
1095  }
1096 
1097  if ((bytestream2_tell(&gb) & 1))
1098  bytestream2_skip(&gb, 1);
1099  break;
1100  }
1102  for (channel = 0; channel < avctx->channels; channel++) {
1103  cs = &c->status[channel];
1104  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1105  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1106  if (cs->step_index > 88u){
1107  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1108  channel, cs->step_index);
1109  return AVERROR_INVALIDDATA;
1110  }
1111  }
1112 
1113  for (n = nb_samples >> (1 - st); n > 0; n--) {
1114  int v1, v2;
1115  int v = bytestream2_get_byteu(&gb);
1116  /* nibbles are swapped for mono */
1117  if (st) {
1118  v1 = v >> 4;
1119  v2 = v & 0x0F;
1120  } else {
1121  v2 = v >> 4;
1122  v1 = v & 0x0F;
1123  }
1124  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1125  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1126  }
1127  break;
1129  for (channel = 0; channel < avctx->channels; channel++) {
1130  cs = &c->status[channel];
1131  samples = samples_p[channel];
1132  bytestream2_skip(&gb, 4);
1133  for (n = 0; n < nb_samples; n += 2) {
1134  int v = bytestream2_get_byteu(&gb);
1135  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1136  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1137  }
1138  }
1139  break;
1141  while (bytestream2_get_bytes_left(&gb) > 0) {
1142  int v = bytestream2_get_byteu(&gb);
1143  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1144  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1145  }
1146  break;
1148  while (bytestream2_get_bytes_left(&gb) > 0) {
1149  int v = bytestream2_get_byteu(&gb);
1150  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1151  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1152  }
1153  break;
1155  for (channel = 0; channel < avctx->channels; channel++) {
1156  cs = &c->status[channel];
1157  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1158  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1159  if (cs->step_index > 88u){
1160  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1161  channel, cs->step_index);
1162  return AVERROR_INVALIDDATA;
1163  }
1164  }
1165  for (n = 0; n < nb_samples / 2; n++) {
1166  int byte[2];
1167 
1168  byte[0] = bytestream2_get_byteu(&gb);
1169  if (st)
1170  byte[1] = bytestream2_get_byteu(&gb);
1171  for(channel = 0; channel < avctx->channels; channel++) {
1172  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1173  }
1174  for(channel = 0; channel < avctx->channels; channel++) {
1175  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1176  }
1177  }
1178  break;
1180  if (c->vqa_version == 3) {
1181  for (channel = 0; channel < avctx->channels; channel++) {
1182  int16_t *smp = samples_p[channel];
1183 
1184  for (n = nb_samples / 2; n > 0; n--) {
1185  int v = bytestream2_get_byteu(&gb);
1186  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1187  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1188  }
1189  }
1190  } else {
1191  for (n = nb_samples / 2; n > 0; n--) {
1192  for (channel = 0; channel < avctx->channels; channel++) {
1193  int v = bytestream2_get_byteu(&gb);
1194  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1195  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1196  }
1197  samples += avctx->channels;
1198  }
1199  }
1200  bytestream2_seek(&gb, 0, SEEK_END);
1201  break;
1202  case AV_CODEC_ID_ADPCM_XA:
1203  {
1204  int16_t *out0 = samples_p[0];
1205  int16_t *out1 = samples_p[1];
1206  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1207  int sample_offset = 0;
1208  int bytes_remaining;
1209  while (bytestream2_get_bytes_left(&gb) >= 128) {
1210  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1211  &c->status[0], &c->status[1],
1212  avctx->channels, sample_offset)) < 0)
1213  return ret;
1214  bytestream2_skipu(&gb, 128);
1215  sample_offset += samples_per_block;
1216  }
1217  /* Less than a full block of data left, e.g. when reading from
1218  * 2324 byte per sector XA; the remainder is padding */
1219  bytes_remaining = bytestream2_get_bytes_left(&gb);
1220  if (bytes_remaining > 0) {
1221  bytestream2_skip(&gb, bytes_remaining);
1222  }
1223  break;
1224  }
1226  for (i=0; i<=st; i++) {
1227  c->status[i].step_index = bytestream2_get_le32u(&gb);
1228  if (c->status[i].step_index > 88u) {
1229  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1230  i, c->status[i].step_index);
1231  return AVERROR_INVALIDDATA;
1232  }
1233  }
1234  for (i=0; i<=st; i++) {
1235  c->status[i].predictor = bytestream2_get_le32u(&gb);
1236  if (FFABS(c->status[i].predictor) > (1<<16))
1237  return AVERROR_INVALIDDATA;
1238  }
1239 
1240  for (n = nb_samples >> (1 - st); n > 0; n--) {
1241  int byte = bytestream2_get_byteu(&gb);
1242  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1243  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1244  }
1245  break;
1247  for (n = nb_samples >> (1 - st); n > 0; n--) {
1248  int byte = bytestream2_get_byteu(&gb);
1249  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1250  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1251  }
1252  break;
1253  case AV_CODEC_ID_ADPCM_EA:
1254  {
1255  int previous_left_sample, previous_right_sample;
1256  int current_left_sample, current_right_sample;
1257  int next_left_sample, next_right_sample;
1258  int coeff1l, coeff2l, coeff1r, coeff2r;
1259  int shift_left, shift_right;
1260 
1261  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1262  each coding 28 stereo samples. */
1263 
1264  if(avctx->channels != 2)
1265  return AVERROR_INVALIDDATA;
1266 
1267  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1268  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1269  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1270  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1271 
1272  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1273  int byte = bytestream2_get_byteu(&gb);
1274  coeff1l = ea_adpcm_table[ byte >> 4 ];
1275  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1276  coeff1r = ea_adpcm_table[ byte & 0x0F];
1277  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1278 
1279  byte = bytestream2_get_byteu(&gb);
1280  shift_left = 20 - (byte >> 4);
1281  shift_right = 20 - (byte & 0x0F);
1282 
1283  for (count2 = 0; count2 < 28; count2++) {
1284  byte = bytestream2_get_byteu(&gb);
1285  next_left_sample = sign_extend(byte >> 4, 4) << shift_left;
1286  next_right_sample = sign_extend(byte, 4) << shift_right;
1287 
1288  next_left_sample = (next_left_sample +
1289  (current_left_sample * coeff1l) +
1290  (previous_left_sample * coeff2l) + 0x80) >> 8;
1291  next_right_sample = (next_right_sample +
1292  (current_right_sample * coeff1r) +
1293  (previous_right_sample * coeff2r) + 0x80) >> 8;
1294 
1295  previous_left_sample = current_left_sample;
1296  current_left_sample = av_clip_int16(next_left_sample);
1297  previous_right_sample = current_right_sample;
1298  current_right_sample = av_clip_int16(next_right_sample);
1299  *samples++ = current_left_sample;
1300  *samples++ = current_right_sample;
1301  }
1302  }
1303 
1304  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1305 
1306  break;
1307  }
1309  {
1310  int coeff[2][2], shift[2];
1311 
1312  for(channel = 0; channel < avctx->channels; channel++) {
1313  int byte = bytestream2_get_byteu(&gb);
1314  for (i=0; i<2; i++)
1315  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1316  shift[channel] = 20 - (byte & 0x0F);
1317  }
1318  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1319  int byte[2];
1320 
1321  byte[0] = bytestream2_get_byteu(&gb);
1322  if (st) byte[1] = bytestream2_get_byteu(&gb);
1323  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1324  for(channel = 0; channel < avctx->channels; channel++) {
1325  int sample = sign_extend(byte[channel] >> i, 4) << shift[channel];
1326  sample = (sample +
1327  c->status[channel].sample1 * coeff[channel][0] +
1328  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1330  c->status[channel].sample1 = av_clip_int16(sample);
1331  *samples++ = c->status[channel].sample1;
1332  }
1333  }
1334  }
1335  bytestream2_seek(&gb, 0, SEEK_END);
1336  break;
1337  }
1340  case AV_CODEC_ID_ADPCM_EA_R3: {
1341  /* channel numbering
1342  2chan: 0=fl, 1=fr
1343  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1344  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1345  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1346  int previous_sample, current_sample, next_sample;
1347  int coeff1, coeff2;
1348  int shift;
1349  unsigned int channel;
1350  uint16_t *samplesC;
1351  int count = 0;
1352  int offsets[6];
1353 
1354  for (channel=0; channel<avctx->channels; channel++)
1355  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1356  bytestream2_get_le32(&gb)) +
1357  (avctx->channels + 1) * 4;
1358 
1359  for (channel=0; channel<avctx->channels; channel++) {
1360  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1361  samplesC = samples_p[channel];
1362 
1363  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1364  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1365  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1366  } else {
1367  current_sample = c->status[channel].predictor;
1368  previous_sample = c->status[channel].prev_sample;
1369  }
1370 
1371  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1372  int byte = bytestream2_get_byte(&gb);
1373  if (byte == 0xEE) { /* only seen in R2 and R3 */
1374  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1375  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1376 
1377  for (count2=0; count2<28; count2++)
1378  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1379  } else {
1380  coeff1 = ea_adpcm_table[ byte >> 4 ];
1381  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1382  shift = 20 - (byte & 0x0F);
1383 
1384  for (count2=0; count2<28; count2++) {
1385  if (count2 & 1)
1386  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1387  else {
1388  byte = bytestream2_get_byte(&gb);
1389  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1390  }
1391 
1392  next_sample += (current_sample * coeff1) +
1393  (previous_sample * coeff2);
1394  next_sample = av_clip_int16(next_sample >> 8);
1395 
1396  previous_sample = current_sample;
1397  current_sample = next_sample;
1398  *samplesC++ = current_sample;
1399  }
1400  }
1401  }
1402  if (!count) {
1403  count = count1;
1404  } else if (count != count1) {
1405  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1406  count = FFMAX(count, count1);
1407  }
1408 
1409  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1410  c->status[channel].predictor = current_sample;
1411  c->status[channel].prev_sample = previous_sample;
1412  }
1413  }
1414 
1415  frame->nb_samples = count * 28;
1416  bytestream2_seek(&gb, 0, SEEK_END);
1417  break;
1418  }
1420  for (channel=0; channel<avctx->channels; channel++) {
1421  int coeff[2][4], shift[4];
1422  int16_t *s = samples_p[channel];
1423  for (n = 0; n < 4; n++, s += 32) {
1424  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1425  for (i=0; i<2; i++)
1426  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1427  s[0] = val & ~0x0F;
1428 
1429  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1430  shift[n] = 20 - (val & 0x0F);
1431  s[1] = val & ~0x0F;
1432  }
1433 
1434  for (m=2; m<32; m+=2) {
1435  s = &samples_p[channel][m];
1436  for (n = 0; n < 4; n++, s += 32) {
1437  int level, pred;
1438  int byte = bytestream2_get_byteu(&gb);
1439 
1440  level = sign_extend(byte >> 4, 4) << shift[n];
1441  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1442  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1443 
1444  level = sign_extend(byte, 4) << shift[n];
1445  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1446  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1447  }
1448  }
1449  }
1450  break;
1452  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1453  c->status[0].step_index = bytestream2_get_byteu(&gb);
1454  bytestream2_skipu(&gb, 5);
1455  if (c->status[0].step_index > 88u) {
1456  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1457  c->status[0].step_index);
1458  return AVERROR_INVALIDDATA;
1459  }
1460 
1461  for (n = nb_samples >> (1 - st); n > 0; n--) {
1462  int v = bytestream2_get_byteu(&gb);
1463 
1464  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1465  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1466  }
1467  break;
1469  for (i = 0; i < avctx->channels; i++) {
1470  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1471  c->status[i].step_index = bytestream2_get_byteu(&gb);
1472  bytestream2_skipu(&gb, 1);
1473  if (c->status[i].step_index > 88u) {
1474  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1475  c->status[i].step_index);
1476  return AVERROR_INVALIDDATA;
1477  }
1478  }
1479 
1480  for (n = nb_samples >> (1 - st); n > 0; n--) {
1481  int v = bytestream2_get_byteu(&gb);
1482 
1483  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4, 3);
1484  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf, 3);
1485  }
1486  break;
1487  case AV_CODEC_ID_ADPCM_CT:
1488  for (n = nb_samples >> (1 - st); n > 0; n--) {
1489  int v = bytestream2_get_byteu(&gb);
1490  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1491  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1492  }
1493  break;
1497  if (!c->status[0].step_index) {
1498  /* the first byte is a raw sample */
1499  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1500  if (st)
1501  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1502  c->status[0].step_index = 1;
1503  nb_samples--;
1504  }
1505  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1506  for (n = nb_samples >> (1 - st); n > 0; n--) {
1507  int byte = bytestream2_get_byteu(&gb);
1508  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1509  byte >> 4, 4, 0);
1510  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1511  byte & 0x0F, 4, 0);
1512  }
1513  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1514  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1515  int byte = bytestream2_get_byteu(&gb);
1516  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1517  byte >> 5 , 3, 0);
1518  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1519  (byte >> 2) & 0x07, 3, 0);
1520  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1521  byte & 0x03, 2, 0);
1522  }
1523  } else {
1524  for (n = nb_samples >> (2 - st); n > 0; n--) {
1525  int byte = bytestream2_get_byteu(&gb);
1526  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1527  byte >> 6 , 2, 2);
1528  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1529  (byte >> 4) & 0x03, 2, 2);
1530  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1531  (byte >> 2) & 0x03, 2, 2);
1532  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1533  byte & 0x03, 2, 2);
1534  }
1535  }
1536  break;
1537  case AV_CODEC_ID_ADPCM_SWF:
1538  adpcm_swf_decode(avctx, buf, buf_size, samples);
1539  bytestream2_seek(&gb, 0, SEEK_END);
1540  break;
1542  for (n = nb_samples >> (1 - st); n > 0; n--) {
1543  int v = bytestream2_get_byteu(&gb);
1544  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1545  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1546  }
1547  break;
1549  if (!c->has_status) {
1550  for (channel = 0; channel < avctx->channels; channel++)
1551  c->status[channel].step = 0;
1552  c->has_status = 1;
1553  }
1554  for (channel = 0; channel < avctx->channels; channel++) {
1555  samples = samples_p[channel];
1556  for (n = nb_samples >> 1; n > 0; n--) {
1557  int v = bytestream2_get_byteu(&gb);
1558  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1559  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1560  }
1561  }
1562  break;
1563  case AV_CODEC_ID_ADPCM_AFC:
1564  {
1565  int samples_per_block;
1566  int blocks;
1567 
1568  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1569  samples_per_block = avctx->extradata[0] / 16;
1570  blocks = nb_samples / avctx->extradata[0];
1571  } else {
1572  samples_per_block = nb_samples / 16;
1573  blocks = 1;
1574  }
1575 
1576  for (m = 0; m < blocks; m++) {
1577  for (channel = 0; channel < avctx->channels; channel++) {
1578  int prev1 = c->status[channel].sample1;
1579  int prev2 = c->status[channel].sample2;
1580 
1581  samples = samples_p[channel] + m * 16;
1582  /* Read in every sample for this channel. */
1583  for (i = 0; i < samples_per_block; i++) {
1584  int byte = bytestream2_get_byteu(&gb);
1585  int scale = 1 << (byte >> 4);
1586  int index = byte & 0xf;
1587  int factor1 = ff_adpcm_afc_coeffs[0][index];
1588  int factor2 = ff_adpcm_afc_coeffs[1][index];
1589 
1590  /* Decode 16 samples. */
1591  for (n = 0; n < 16; n++) {
1592  int32_t sampledat;
1593 
1594  if (n & 1) {
1595  sampledat = sign_extend(byte, 4);
1596  } else {
1597  byte = bytestream2_get_byteu(&gb);
1598  sampledat = sign_extend(byte >> 4, 4);
1599  }
1600 
1601  sampledat = ((prev1 * factor1 + prev2 * factor2) +
1602  ((sampledat * scale) << 11)) >> 11;
1603  *samples = av_clip_int16(sampledat);
1604  prev2 = prev1;
1605  prev1 = *samples++;
1606  }
1607  }
1608 
1609  c->status[channel].sample1 = prev1;
1610  c->status[channel].sample2 = prev2;
1611  }
1612  }
1613  bytestream2_seek(&gb, 0, SEEK_END);
1614  break;
1615  }
1616  case AV_CODEC_ID_ADPCM_THP:
1618  {
1619  int table[14][16];
1620  int ch;
1621 
1622 #define THP_GET16(g) \
1623  sign_extend( \
1624  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1625  bytestream2_get_le16u(&(g)) : \
1626  bytestream2_get_be16u(&(g)), 16)
1627 
1628  if (avctx->extradata) {
1630  if (avctx->extradata_size < 32 * avctx->channels) {
1631  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1632  return AVERROR_INVALIDDATA;
1633  }
1634 
1635  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1636  for (i = 0; i < avctx->channels; i++)
1637  for (n = 0; n < 16; n++)
1638  table[i][n] = THP_GET16(tb);
1639  } else {
1640  for (i = 0; i < avctx->channels; i++)
1641  for (n = 0; n < 16; n++)
1642  table[i][n] = THP_GET16(gb);
1643 
1644  if (!c->has_status) {
1645  /* Initialize the previous sample. */
1646  for (i = 0; i < avctx->channels; i++) {
1647  c->status[i].sample1 = THP_GET16(gb);
1648  c->status[i].sample2 = THP_GET16(gb);
1649  }
1650  c->has_status = 1;
1651  } else {
1652  bytestream2_skip(&gb, avctx->channels * 4);
1653  }
1654  }
1655 
1656  for (ch = 0; ch < avctx->channels; ch++) {
1657  samples = samples_p[ch];
1658 
1659  /* Read in every sample for this channel. */
1660  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1661  int byte = bytestream2_get_byteu(&gb);
1662  int index = (byte >> 4) & 7;
1663  unsigned int exp = byte & 0x0F;
1664  int factor1 = table[ch][index * 2];
1665  int factor2 = table[ch][index * 2 + 1];
1666 
1667  /* Decode 14 samples. */
1668  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1669  int32_t sampledat;
1670 
1671  if (n & 1) {
1672  sampledat = sign_extend(byte, 4);
1673  } else {
1674  byte = bytestream2_get_byteu(&gb);
1675  sampledat = sign_extend(byte >> 4, 4);
1676  }
1677 
1678  sampledat = ((c->status[ch].sample1 * factor1
1679  + c->status[ch].sample2 * factor2) >> 11) + (sampledat << exp);
1680  *samples = av_clip_int16(sampledat);
1681  c->status[ch].sample2 = c->status[ch].sample1;
1682  c->status[ch].sample1 = *samples++;
1683  }
1684  }
1685  }
1686  break;
1687  }
1688  case AV_CODEC_ID_ADPCM_DTK:
1689  for (channel = 0; channel < avctx->channels; channel++) {
1690  samples = samples_p[channel];
1691 
1692  /* Read in every sample for this channel. */
1693  for (i = 0; i < nb_samples / 28; i++) {
1694  int byte, header;
1695  if (channel)
1696  bytestream2_skipu(&gb, 1);
1697  header = bytestream2_get_byteu(&gb);
1698  bytestream2_skipu(&gb, 3 - channel);
1699 
1700  /* Decode 28 samples. */
1701  for (n = 0; n < 28; n++) {
1702  int32_t sampledat, prev;
1703 
1704  switch (header >> 4) {
1705  case 1:
1706  prev = (c->status[channel].sample1 * 0x3c);
1707  break;
1708  case 2:
1709  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1710  break;
1711  case 3:
1712  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1713  break;
1714  default:
1715  prev = 0;
1716  }
1717 
1718  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1719 
1720  byte = bytestream2_get_byteu(&gb);
1721  if (!channel)
1722  sampledat = sign_extend(byte, 4);
1723  else
1724  sampledat = sign_extend(byte >> 4, 4);
1725 
1726  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1727  *samples++ = av_clip_int16(sampledat >> 6);
1729  c->status[channel].sample1 = sampledat;
1730  }
1731  }
1732  if (!channel)
1733  bytestream2_seek(&gb, 0, SEEK_SET);
1734  }
1735  break;
1736  case AV_CODEC_ID_ADPCM_PSX:
1737  for (channel = 0; channel < avctx->channels; channel++) {
1738  samples = samples_p[channel];
1739 
1740  /* Read in every sample for this channel. */
1741  for (i = 0; i < nb_samples / 28; i++) {
1742  int filter, shift, flag, byte;
1743 
1744  filter = bytestream2_get_byteu(&gb);
1745  shift = filter & 0xf;
1746  filter = filter >> 4;
1747  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table))
1748  return AVERROR_INVALIDDATA;
1749  flag = bytestream2_get_byteu(&gb);
1750 
1751  /* Decode 28 samples. */
1752  for (n = 0; n < 28; n++) {
1753  int sample = 0, scale;
1754 
1755  if (flag < 0x07) {
1756  if (n & 1) {
1757  scale = sign_extend(byte >> 4, 4);
1758  } else {
1759  byte = bytestream2_get_byteu(&gb);
1760  scale = sign_extend(byte, 4);
1761  }
1762 
1763  scale = scale << 12;
1764  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
1765  }
1766  *samples++ = av_clip_int16(sample);
1768  c->status[channel].sample1 = sample;
1769  }
1770  }
1771  }
1772  break;
1773 
1774  default:
1775  av_assert0(0); // unsupported codec_id should not happen
1776  }
1777 
1778  if (avpkt->size && bytestream2_tell(&gb) == 0) {
1779  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
1780  return AVERROR_INVALIDDATA;
1781  }
1782 
1783  *got_frame_ptr = 1;
1784 
1785  if (avpkt->size < bytestream2_tell(&gb)) {
1786  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
1787  return avpkt->size;
1788  }
1789 
1790  return bytestream2_tell(&gb);
1791 }
1792 
1793 static void adpcm_flush(AVCodecContext *avctx)
1794 {
1795  ADPCMDecodeContext *c = avctx->priv_data;
1796  c->has_status = 0;
1797 }
1798 
1799 
1807 
1808 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
1809 AVCodec ff_ ## name_ ## _decoder = { \
1810  .name = #name_, \
1811  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
1812  .type = AVMEDIA_TYPE_AUDIO, \
1813  .id = id_, \
1814  .priv_data_size = sizeof(ADPCMDecodeContext), \
1815  .init = adpcm_decode_init, \
1816  .decode = adpcm_decode_frame, \
1817  .flush = adpcm_flush, \
1818  .capabilities = AV_CODEC_CAP_DR1, \
1819  .sample_fmts = sample_fmts_, \
1820 }
1821 
1822 /* Note: Do not forget to add new entries to the Makefile as well. */
1823 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
1824 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
1825 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
1826 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
1827 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
1828 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
1829 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
1830 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
1831 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
1832 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
1833 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
1834 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
1835 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
1836 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
1837 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
1838 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
1839 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
1840 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
1841 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
1842 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
1843 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
1844 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
1845 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
1846 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
1847 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
1848 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
1849 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
1850 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
1851 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
1852 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
1853 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
1854 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
1855 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
1856 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
1857 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
1858 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
1859 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1577
const char const char void * val
Definition: avisynth_c.h:863
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:68
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int shift(int a, int b)
Definition: sonic.c:82
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:403
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define THP_GET16(g)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:115
#define avpriv_request_sample(...)
channels
Definition: aptx.c:30
int size
Definition: avcodec.h:1481
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:1800
#define sample
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2265
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:308
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2236
uint8_t
#define av_cold
Definition: attributes.h:82
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:92
float delta
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:1793
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:492
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1669
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
ADPCM tables.
uint8_t * data
Definition: avcodec.h:1480
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples that will be decoded from the packet.
Definition: adpcm.c:560
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
bitstream reader API header.
ptrdiff_t size
Definition: opengl_enc.c:100
static const uint8_t header[24]
Definition: sdr2.c:67
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2792
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
enum AVCodecID id
Definition: avcodec.h:3506
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:60
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
ADPCM encoder/decoder common header.
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
Definition: adpcm.c:282
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
GLsizei count
Definition: opengl_enc.c:108
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:327
#define FFMAX(a, b)
Definition: common.h:94
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:259
int8_t exp
Definition: eval.c:72
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:77
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:412
#define FFMIN(a, b)
Definition: common.h:96
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
int vqa_version
VQA version.
Definition: adpcm.c:88
int32_t
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:1802
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
int n
Definition: avisynth_c.h:760
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
Definition: siprdata.h:259
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
main external API structure.
Definition: avcodec.h:1568
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:389
#define DK3_GET_NEXT_NIBBLE()
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1968
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:233
void * buf
Definition: avisynth_c.h:766
int extradata_size
Definition: avcodec.h:1670
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int index
Definition: gxfenc.c:89
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:189
ADPCMChannelStatus status[14]
Definition: adpcm.c:87
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:369
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
uint8_t level
Definition: svq3.c:207
int
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
common internal api header.
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:762
signed 16 bits
Definition: samplefmt.h:61
#define flag(name)
Definition: cbs_av1.c:553
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
unsigned bps
Definition: movenc.c:1532
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:793
void * priv_data
Definition: avcodec.h:1595
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:664
int channels
number of audio channels
Definition: avcodec.h:2229
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:348
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:1804
Filter the word “frame” indicates either a video frame or a group of audio samples
int16_t step_index
Definition: adpcm.h:35
signed 16 bits, planar
Definition: samplefmt.h:67
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1457
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
for(j=16;j >0;--j)
#define tb
Definition: regdef.h:68
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:1808